diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 529460c6a..6f7720c2b 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -8,6 +8,7 @@ env:
SGX_WALLET_TAG: "1.83.0-beta.5"
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
PYTHON_VERSION: 3.11
+
jobs:
test_core:
runs-on: ubuntu-latest
@@ -15,40 +16,66 @@ jobs:
ETH_PRIVATE_KEY: ${{ secrets.ETH_PRIVATE_KEY }}
ENDPOINT: http://127.0.0.1:8545
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- IMA_ENDPOINT: ${{ secrets.IMA_ENDPOINT }}
SCHAIN_TYPE: ${{ secrets.SCHAIN_TYPE }}
steps:
- uses: actions/checkout@v2
with:
submodules: true
+
- name: Set up Python ${{ env.PYTHON_VERSION }}
uses: actions/setup-python@v1
with:
python-version: ${{ env.PYTHON_VERSION }}
+
- name: Install python dependencies
run: bash ./scripts/install_python_dependencies.sh
+
- name: Lint with flake8
run: flake8 .
+
- name: Launch anvil node
run: |
docker run -d --network host --name anvil ghcr.io/foundry-rs/foundry anvil && sleep 5 && docker logs anvil --tail 1000
+
- name: Deploy manager & ima contracts
run: |
bash ./helper-scripts/deploy_test_ima.sh
+
+ - name: Cleanup skale-manager image
+ run: |
docker rmi -f skalenetwork/skale-manager:${{ env.MANAGER_TAG }}
+
- name: Show stats before tests
if: always()
run: |
sudo lsblk -f
sudo free -h
+
- name: Run core tests
run: |
bash ./scripts/run_core_tests.sh
- - name: Show stats after tests
+
+ - name: Cleanup docker artifacts
+ run: |
+ docker rm -f $(docker ps -aq)
+ docker rmi -f $(docker images -q)
+
+ - name: Show stats after core tests
+ if: always()
+ run: |
+ sudo lsblk -f
+ sudo free -h
+
+ - name: Run firewall tests
+ run: |
+ bash ./scripts/run_firewall_test.sh
+
+ - name: Show stats after firewall tests
if: always()
run: |
sudo lsblk -f
sudo free -h
+
- name: Run codecov
run: |
codecov -t $CODECOV_TOKEN
diff --git a/.gitignore b/.gitignore
index 1f51a10dc..7667e9797 100644
--- a/.gitignore
+++ b/.gitignore
@@ -63,6 +63,8 @@ schain_status_sample.json
*.lock
skale-manager-*
+manager.json
+meta.json
tests/skale-data/contracts_info/ima.json
tests/skale-data/contracts_info/schain_ima_abi.json
diff --git a/VERSION b/VERSION
index 6a6a3d8e3..24ba9a38d 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.6.1
+2.7.0
diff --git a/admin.py b/admin.py
index 0a70ee312..4ecf5923b 100644
--- a/admin.py
+++ b/admin.py
@@ -27,7 +27,7 @@
from core.schains.process_manager import run_process_manager
from core.schains.cleaner import run_cleaner
from core.updates import soft_updates
-from core.filebeat import update_filebeat_service
+from core.monitoring import update_monitoring_services
from tools.configs import BACKUP_RUN, INIT_LOCK_PATH, PULL_CONFIG_FOR_SCHAIN
from tools.configs.web3 import (
@@ -82,7 +82,7 @@ def worker():
skale_ima = SkaleIma(ENDPOINT, MAINNET_IMA_ABI_FILEPATH, wallet)
if BACKUP_RUN:
logger.info('Running sChains in snapshot download mode')
- update_filebeat_service(node_config.ip, node_config.id, skale)
+ update_monitoring_services(node_config.ip, node_config.id, skale)
monitor(skale, skale_ima, node_config)
diff --git a/app.py b/app.py
index b4495af7f..9fbdf4f5b 100644
--- a/app.py
+++ b/app.py
@@ -35,10 +35,10 @@
FLASK_DEBUG_MODE
)
from tools.configs.web3 import ENDPOINT
-from tools.db import get_database, REDIS_URI
from tools.docker_utils import DockerUtils
from tools.helper import wait_until_admin_inited
from tools.logger import init_api_logger
+from tools.resources import get_database, REDIS_URI
from tools.str_formatters import arguments_list_string
from web.routes.node import node_bp
diff --git a/core/filebeat.py b/core/filebeat.py
deleted file mode 100644
index 21635038b..000000000
--- a/core/filebeat.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2020 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-from typing import Optional
-
-from tools.helper import process_template
-from tools.docker_utils import DockerUtils
-from tools.str_formatters import arguments_list_string
-
-from tools.configs.filebeat import (FILEBEAT_TEMPLATE_PATH, FILEBEAT_CONTAINER_NAME,
- FILEBEAT_CONFIG_PATH)
-
-logger = logging.getLogger(__name__)
-
-
-def update_filebeat_service(node_ip, node_id, skale, dutils: Optional[DockerUtils] = None):
- logger.info('Going to update filebeat service...')
- dutils = dutils or DockerUtils()
- contract_address = skale.manager.address
- template_data = {
- 'ip': node_ip,
- 'id': node_id,
- 'contract_address': contract_address
- }
- logger.info(arguments_list_string(
- {'Node ID': node_id, 'Node IP': node_ip, 'Manager contract address': contract_address},
- 'Processing Filebeat template'))
- process_template(FILEBEAT_TEMPLATE_PATH, FILEBEAT_CONFIG_PATH, template_data)
- filebeat_container = dutils.client.containers.get(FILEBEAT_CONTAINER_NAME)
- filebeat_container.restart()
- logger.info('Filebeat service file updated, service restarted')
-
-
-def filebeat_config_processed() -> bool:
- with open(FILEBEAT_CONFIG_PATH) as f:
- return 'id: ' in f.read()
diff --git a/core/monitoring.py b/core/monitoring.py
new file mode 100644
index 000000000..9aa87dcf0
--- /dev/null
+++ b/core/monitoring.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2020 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+from typing import Optional
+
+from tools.helper import process_template
+from tools.docker_utils import DockerUtils
+
+from tools.configs import SKALE_DIR_HOST
+from tools.configs.monitoring import (
+ FILEBEAT_TEMPLATE_PATH, FILEBEAT_CONTAINER_NAME,
+ FILEBEAT_CONFIG_PATH,
+ INFLUX_URL,
+ TELEGRAF,
+ TELEGRAF_CONTAINER_NAME, TELEGRAF_IMAGE,
+ TELEGRAF_TEMPLATE_PATH,
+ TELEGRAF_CONFIG_PATH,
+ TELEGRAF_MEM_LIMIT
+)
+
+logger = logging.getLogger(__name__)
+
+
+class TelegrafNotConfiguredError(Exception):
+ pass
+
+
+def update_filebeat_service(node_ip, node_id, skale, dutils: Optional[DockerUtils] = None):
+ dutils = dutils or DockerUtils()
+ contract_address = skale.manager.address
+ template_data = {
+ 'ip': node_ip,
+ 'id': node_id,
+ 'contract_address': contract_address
+ }
+
+ logger.info('Configuring filebeat %s', template_data)
+ process_template(FILEBEAT_TEMPLATE_PATH, FILEBEAT_CONFIG_PATH, template_data)
+ filebeat_container = dutils.client.containers.get(FILEBEAT_CONTAINER_NAME)
+ filebeat_container.restart()
+ logger.info('Filebeat config updated, telegraf restarted')
+
+
+def filebeat_config_processed() -> bool:
+ with open(FILEBEAT_CONFIG_PATH) as f:
+ return 'id: ' in f.read()
+
+
+def ensure_telegraf_running(dutils: Optional[DockerUtils] = None) -> None:
+ dutils = dutils or DockerUtils()
+ if dutils.is_container_exists(TELEGRAF_CONTAINER_NAME):
+ dutils.restart(TELEGRAF_CONTAINER_NAME)
+ else:
+ dutils.run_container(
+ image_name=TELEGRAF_IMAGE,
+ name=TELEGRAF_CONTAINER_NAME,
+ network_mode='host',
+ user='telegraf:998',
+ restart_policy={'name': 'on-failure'},
+ environment={'HOST_PROC': '/host/proc'},
+ volumes={
+ '/proc': {'bind': '/host/proc', 'mode': 'ro'},
+ f'{SKALE_DIR_HOST}/config/telegraf.conf': {'bind': '/etc/telegraf/telegraf.conf', 'mode': 'ro'}, # noqa
+ f'{SKALE_DIR_HOST}/node_data/telegraf': {'bind': '/var/lib/telegraf', 'mode': 'rw'},
+ '/var/run/skale/': {'bind': '/var/run/skale', 'mode': 'rw'}
+ },
+ mem_limit=TELEGRAF_MEM_LIMIT
+ )
+
+
+def update_telegraf_service(
+ node_ip: str,
+ node_id: int,
+ url: str = INFLUX_URL,
+ dutils: Optional[DockerUtils] = None
+) -> None:
+ dutils = dutils or DockerUtils()
+ template_data = {
+ 'ip': node_ip,
+ 'node_id': str(node_id),
+ 'url': url
+ }
+ missing = list(filter(lambda k: not template_data[k], template_data))
+
+ if missing:
+ emsg = f'TELEGRAF=True is set, but missing options {template_data}'
+ raise TelegrafNotConfiguredError(emsg)
+
+ logger.info('Configuring telegraf %s', template_data)
+ process_template(TELEGRAF_TEMPLATE_PATH, TELEGRAF_CONFIG_PATH, template_data)
+
+ ensure_telegraf_running(dutils)
+ logger.info('Telegraf config updated, telegraf restarted')
+
+
+def telegraf_config_processed() -> bool:
+ with open(TELEGRAF_CONFIG_PATH) as f:
+ return 'id: ' in f.read()
+
+
+def update_monitoring_services(node_ip, node_id, skale, dutils: Optional[DockerUtils] = None):
+ update_filebeat_service(node_ip, node_id, skale, dutils=dutils)
+ if TELEGRAF:
+ update_telegraf_service(node_ip, node_id, dutils=dutils)
diff --git a/core/node.py b/core/node.py
index b6979557c..72df5bf00 100644
--- a/core/node.py
+++ b/core/node.py
@@ -37,7 +37,7 @@
from skale.utils.helper import ip_from_bytes
from skale.utils.web3_utils import public_key_to_address, to_checksum_address
-from core.filebeat import update_filebeat_service
+from core.monitoring import update_monitoring_services
from tools.configs import WATCHDOG_PORT, CHANGE_IP_DELAY, CHECK_REPORT_PATH, META_FILEPATH
from tools.helper import read_json
from tools.str_formatters import arguments_list_string
@@ -135,7 +135,7 @@ def register(self, ip, public_ip, port, name, domain_name,
self.config.name = name
self.config.ip = ip
- update_filebeat_service(public_ip, self.config.id, self.skale)
+ update_monitoring_services(public_ip, self.config.id, self.skale)
return self._ok(data=self.config.all())
def create_node_on_contracts(self, ip, public_ip, port, name, domain_name,
diff --git a/core/node_config.py b/core/node_config.py
index 9d826886d..189498150 100644
--- a/core/node_config.py
+++ b/core/node_config.py
@@ -17,80 +17,54 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import functools
import logging
-from filelock import FileLock
-from tools.helper import read_json, write_json, init_file
-from tools.configs import NODE_CONFIG_FILEPATH, NODE_CONFIG_LOCK_PATH
+from tools.configs import NODE_CONFIG_FILEPATH
+from tools.json_object import JsonObject
logger = logging.getLogger(__name__)
-def config_setter(func):
- @functools.wraps(func)
- def wrapper_decorator(*args, **kwargs):
- field_name, field_value = func(*args, **kwargs)
- lock = FileLock(NODE_CONFIG_LOCK_PATH)
- with lock:
- config = read_json(NODE_CONFIG_FILEPATH)
- config[field_name] = field_value
- write_json(NODE_CONFIG_FILEPATH, config)
- return wrapper_decorator
-
-
-def config_getter(func):
- @functools.wraps(func)
- def wrapper_decorator(*args, **kwargs):
- field_name = func(*args, **kwargs)
- config = read_json(NODE_CONFIG_FILEPATH)
- return config.get(field_name)
- return wrapper_decorator
-
-
-class NodeConfig:
+class NodeConfig(JsonObject):
def __init__(self, filepath: str = NODE_CONFIG_FILEPATH):
- init_file(filepath, {})
+ super().__init__(filepath=filepath)
@property
- @config_getter
def id(self) -> int:
- return 'node_id'
+ return self._get('node_id')
@id.setter
- @config_setter
def id(self, node_id: int) -> None:
- return 'node_id', node_id
+ return self._set('node_id', node_id)
@property
- @config_getter
def ip(self) -> str:
- return 'node_ip'
+ return self._get('node_ip')
@ip.setter
- @config_setter
def ip(self, ip: str) -> None:
- return 'node_ip', ip
+ return self._set('node_ip', ip)
@property
- @config_getter
def name(self) -> str:
- return 'name'
+ return self._get('name')
@name.setter
- @config_setter
def name(self, node_name: str) -> None:
- return 'name', node_name
+ return self._set('name', node_name)
@property
- @config_getter
def sgx_key_name(self) -> int:
- return 'sgx_key_name'
+ return self._get('sgx_key_name')
@sgx_key_name.setter
- @config_setter
def sgx_key_name(self, sgx_key_name: int) -> None:
- return 'sgx_key_name', sgx_key_name
+ return self._set('sgx_key_name', sgx_key_name)
+
+ @property
+ def schain_base_port(self) -> int:
+ return self._get('schain_base_port') or -1
- def all(self) -> dict:
- return read_json(NODE_CONFIG_FILEPATH)
+ @schain_base_port.setter
+ def schain_base_port(self, schain_port: int) -> None:
+ return self._set('schain_base_port', schain_port)
diff --git a/core/schains/checks.py b/core/schains/checks.py
index ec93d4b31..8f4b0d9ee 100644
--- a/core/schains/checks.py
+++ b/core/schains/checks.py
@@ -22,36 +22,45 @@
import time
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
-from core.node import ExtendedManagerNodeInfo, get_current_ips
+import statsd
+
+from core.node import ExtendedManagerNodeInfo, get_current_ips
from core.schains.config.directory import get_schain_check_filepath
from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
get_own_ip_from_config,
- get_local_schain_http_endpoint_from_config
+ get_local_schain_http_endpoint_from_config,
)
from core.schains.config.main import (
get_skaled_config_rotations_ids,
- get_upstream_config_rotation_ids
+ get_upstream_config_rotation_ids,
)
from core.schains.dkg.utils import get_secret_key_share_filepath
from core.schains.firewall.types import IRuleController
-from core.schains.ima import get_migration_ts as get_ima_migration_ts
+from core.schains.ima import get_ima_time_frame, get_migration_ts as get_ima_migration_ts
from core.schains.process_manager_helper import is_monitor_process_alive
from core.schains.rpc import (
check_endpoint_alive,
check_endpoint_blocks,
- get_endpoint_alive_check_timeout
+ get_endpoint_alive_check_timeout,
)
from core.schains.external_config import ExternalConfig, ExternalState
-from core.schains.runner import get_container_name, get_image_name, is_new_image_pulled
+from core.schains.runner import (
+ get_container_name,
+ get_ima_container_time_frame,
+ get_image_name,
+ is_new_image_pulled,
+)
from core.schains.skaled_exit_codes import SkaledExitCodes
+from core.schains.volume import is_volume_exists
from tools.configs.containers import IMA_CONTAINER, SCHAIN_CONTAINER
from tools.docker_utils import DockerUtils
-from tools.helper import write_json
+from tools.helper import no_hyphens, write_json
+from tools.resources import get_statsd_client
from tools.str_formatters import arguments_list_string
from web.models.schain import SChainRecord
@@ -70,7 +79,7 @@
'rpc',
'blocks',
'process',
- 'ima_container'
+ 'ima_container',
]
TG_ALLOWED_CHECKS = [
@@ -81,7 +90,7 @@
'rpc',
'blocks',
'process',
- 'ima_container'
+ 'ima_container',
]
@@ -102,10 +111,13 @@ class IChecks(ABC):
def get_name(self) -> str:
pass
- def get_all(self,
- log: bool = True,
- save: bool = False,
- needed: Optional[List[str]] = None) -> Dict:
+ def get_all(
+ self,
+ log: bool = True,
+ save: bool = False,
+ expose: bool = False,
+ needed: Optional[List[str]] = None,
+ ) -> Dict:
if needed:
names = needed
else:
@@ -116,6 +128,8 @@ def get_all(self,
if hasattr(self, name):
logger.debug('Running check %s', name)
checks_status[name] = getattr(self, name).status
+ if expose:
+ send_to_statsd(self.statsd_client, self.get_name(), checks_status)
if log:
log_checks_dict(self.get_name(), checks_status)
if save:
@@ -128,24 +142,27 @@ def is_healthy(self) -> bool:
@classmethod
def get_check_names(cls):
- return list(filter(
- lambda c: not c.startswith('_') and isinstance(
- getattr(cls, c), property),
- dir(cls)
- ))
+ return list(
+ filter(
+ lambda c: not c.startswith('_') and isinstance(getattr(cls, c), property), dir(cls)
+ )
+ )
class ConfigChecks(IChecks):
- def __init__(self,
- schain_name: str,
- node_id: int,
- schain_record: SChainRecord,
- rotation_id: int,
- stream_version: str,
- current_nodes: list[ExtendedManagerNodeInfo],
- estate: ExternalState,
- econfig: Optional[ExternalConfig] = None
- ) -> None:
+ def __init__(
+ self,
+ schain_name: str,
+ node_id: int,
+ schain_record: SChainRecord,
+ rotation_id: int,
+ stream_version: str,
+ current_nodes: list[ExtendedManagerNodeInfo],
+ estate: ExternalState,
+ last_dkg_successful: bool,
+ sync_node: bool = False,
+ econfig: Optional[ExternalConfig] = None,
+ ) -> None:
self.name = schain_name
self.node_id = node_id
self.schain_record = schain_record
@@ -153,10 +170,11 @@ def __init__(self,
self.stream_version = stream_version
self.current_nodes = current_nodes
self.estate = estate
+ self._last_dkg_successful = last_dkg_successful
+ self.sync_node = sync_node
self.econfig = econfig or ExternalConfig(schain_name)
- self.cfm: ConfigFileManager = ConfigFileManager(
- schain_name=schain_name
- )
+ self.cfm: ConfigFileManager = ConfigFileManager(schain_name=schain_name)
+ self.statsd_client = get_statsd_client()
def get_name(self) -> str:
return self.name
@@ -167,13 +185,15 @@ def config_dir(self) -> CheckRes:
dir_path = self.cfm.dirname
return CheckRes(os.path.isdir(dir_path))
+ @property
+ def last_dkg_successful(self) -> CheckRes:
+ """Checks that last dkg was successfuly completed"""
+ return CheckRes(self._last_dkg_successful)
+
@property
def dkg(self) -> CheckRes:
"""Checks that DKG procedure is completed"""
- secret_key_share_filepath = get_secret_key_share_filepath(
- self.name,
- self.rotation_id
- )
+ secret_key_share_filepath = get_secret_key_share_filepath(self.name, self.rotation_id)
return CheckRes(os.path.isfile(secret_key_share_filepath))
@property
@@ -212,17 +232,14 @@ def upstream_config(self) -> CheckRes:
exists,
node_ips_updated,
stream_updated,
- triggered
+ triggered,
)
return CheckRes(exists and node_ips_updated and stream_updated and not triggered)
@property
def external_state(self) -> CheckRes:
actual_state = self.econfig.get()
- logger.debug(
- 'Checking external config. Current %s. Saved %s',
- self.estate, actual_state
- )
+ logger.debug('Checking external config. Current %s. Saved %s', self.estate, actual_state)
return CheckRes(self.econfig.synced(self.estate))
@@ -234,17 +251,18 @@ def __init__(
rule_controller: IRuleController,
*,
econfig: Optional[ExternalConfig] = None,
- dutils: Optional[DockerUtils] = None
+ dutils: Optional[DockerUtils] = None,
+ sync_node: bool = False,
):
self.name = schain_name
self.schain_record = schain_record
self.dutils = dutils or DockerUtils()
self.container_name = get_container_name(SCHAIN_CONTAINER, self.name)
self.econfig = econfig or ExternalConfig(name=schain_name)
+ self.sync_node = sync_node
self.rc = rule_controller
- self.cfm: ConfigFileManager = ConfigFileManager(
- schain_name=schain_name
- )
+ self.cfm: ConfigFileManager = ConfigFileManager(schain_name=schain_name)
+ self.statsd_client = get_statsd_client()
def get_name(self) -> str:
return self.name
@@ -260,9 +278,7 @@ def rotation_id_updated(self) -> CheckRes:
upstream_rotations = get_upstream_config_rotation_ids(self.cfm)
config_rotations = get_skaled_config_rotations_ids(self.cfm)
logger.debug(
- 'Comparing rotation_ids. Upstream: %s. Config: %s',
- upstream_rotations,
- config_rotations
+ 'Comparing rotation_ids. Upstream: %s. Config: %s', upstream_rotations, config_rotations
)
return CheckRes(upstream_rotations == config_rotations)
@@ -274,13 +290,14 @@ def config_updated(self) -> CheckRes:
@property
def config(self) -> CheckRes:
- """ Checks that sChain config file exists """
+ """Checks that sChain config file exists"""
return CheckRes(self.cfm.skaled_config_exists())
@property
def volume(self) -> CheckRes:
"""Checks that sChain volume exists"""
- return CheckRes(self.dutils.is_data_volume_exists(self.name))
+
+ return CheckRes(is_volume_exists(self.name, sync_node=self.sync_node, dutils=self.dutils))
@property
def firewall_rules(self) -> CheckRes:
@@ -292,10 +309,7 @@ def firewall_rules(self) -> CheckRes:
own_ip = get_own_ip_from_config(conf)
ranges = self.econfig.ranges
self.rc.configure(
- base_port=base_port,
- own_ip=own_ip,
- node_ips=node_ips,
- sync_ip_ranges=ranges
+ base_port=base_port, own_ip=own_ip, node_ips=node_ips, sync_ip_ranges=ranges
)
logger.debug(f'Rule controller {self.rc.expected_rules()}')
return CheckRes(self.rc.is_rules_synced())
@@ -321,30 +335,39 @@ def ima_container(self) -> CheckRes:
if not self.econfig.ima_linked:
return CheckRes(True)
container_name = get_container_name(IMA_CONTAINER, self.name)
- new_image_pulled = is_new_image_pulled(
- type=IMA_CONTAINER, dutils=self.dutils)
+ new_image_pulled = is_new_image_pulled(image_type=IMA_CONTAINER, dutils=self.dutils)
migration_ts = get_ima_migration_ts(self.name)
- new = time.time() > migration_ts
+ after = time.time() > migration_ts
container_running = self.dutils.is_container_running(container_name)
- updated_image = False
+ updated_image, updated_time_frame = False, False
if container_running:
- expected_image = get_image_name(type=IMA_CONTAINER, new=new)
+ expected_image = get_image_name(image_type=IMA_CONTAINER, new=after)
image = self.dutils.get_container_image_name(container_name)
updated_image = image == expected_image
+ time_frame = get_ima_time_frame(self.name, after=after)
+ container_time_frame = get_ima_container_time_frame(self.name, self.dutils)
+
+ updated_time_frame = time_frame == container_time_frame
+ logger.debug(
+ 'IMA image %s, container image %s, time frame %d, container_time_frame %d',
+ expected_image,
+ image,
+ time_frame,
+ container_time_frame,
+ )
+
data = {
'container_running': container_running,
'updated_image': updated_image,
- 'new_image_pulled': new_image_pulled
+ 'new_image_pulled': new_image_pulled,
+ 'updated_time_frame': updated_time_frame,
}
- logger.debug(
- '%s, IMA check - %s',
- self.name, data
- )
- result: bool = container_running and updated_image and new_image_pulled
+ logger.debug('%s, IMA check - %s', self.name, data)
+ result: bool = all(data.values())
return CheckRes(result, data=data)
@property
@@ -354,9 +377,7 @@ def rpc(self) -> CheckRes:
if self.config:
config = self.cfm.skaled_config
http_endpoint = get_local_schain_http_endpoint_from_config(config)
- timeout = get_endpoint_alive_check_timeout(
- self.schain_record.failed_rpc_count
- )
+ timeout = get_endpoint_alive_check_timeout(self.schain_record.failed_rpc_count)
res = check_endpoint_alive(http_endpoint, timeout=timeout)
return CheckRes(res)
@@ -393,10 +414,12 @@ def __init__(
stream_version: str,
estate: ExternalState,
current_nodes: list[ExtendedManagerNodeInfo],
+ last_dkg_successful: bool,
rotation_id: int = 0,
*,
econfig: Optional[ExternalConfig] = None,
- dutils: DockerUtils = None
+ dutils: DockerUtils = None,
+ sync_node: bool = False,
):
self._subjects = [
ConfigChecks(
@@ -406,16 +429,19 @@ def __init__(
rotation_id=rotation_id,
stream_version=stream_version,
current_nodes=current_nodes,
+ last_dkg_successful=last_dkg_successful,
estate=estate,
- econfig=econfig
+ econfig=econfig,
+ sync_node=sync_node,
),
SkaledChecks(
schain_name=schain_name,
schain_record=schain_record,
rule_controller=rule_controller,
econfig=econfig,
- dutils=dutils
- )
+ dutils=dutils,
+ sync_node=sync_node,
+ ),
]
def __getattr__(self, attr: str) -> Any:
@@ -433,11 +459,7 @@ def get_all(self, log: bool = True, save: bool = False, needed: Optional[List[st
plain_checks = {}
for subj in self._subjects:
logger.debug('Running checks for %s', subj)
- subj_checks = subj.get_all(
- log=False,
- save=False,
- needed=needed
- )
+ subj_checks = subj.get_all(log=False, save=False, needed=needed)
plain_checks.update(subj_checks)
if not self.estate or not self.estate.ima_linked:
if 'ima_container' in plain_checks:
@@ -456,13 +478,9 @@ def get_api_checks_status(status: Dict, allowed: List = API_ALLOWED_CHECKS) -> D
def save_checks_dict(schain_name, checks_dict):
schain_check_path = get_schain_check_filepath(schain_name)
- logger.info(
- f'Saving checks for the chain {schain_name}: {schain_check_path}')
+ logger.info(f'Saving checks for the chain {schain_name}: {schain_check_path}')
try:
- write_json(schain_check_path, {
- 'time': time.time(),
- 'checks': checks_dict
- })
+ write_json(schain_check_path, {'time': time.time(), 'checks': checks_dict})
except Exception:
logger.exception(f'Failed to save checks: {schain_check_path}')
@@ -474,13 +492,17 @@ def log_checks_dict(schain_name, checks_dict):
if not checks_dict[check]:
failed_checks.append(check)
if len(failed_checks) != 0:
- failed_checks_str = ", ".join(failed_checks)
+ failed_checks_str = ', '.join(failed_checks)
logger.info(
arguments_list_string(
- {
- 'sChain name': schain_name,
- 'Failed checks': failed_checks_str
- },
- 'Failed sChain checks', 'error'
+ {'sChain name': schain_name, 'Failed checks': failed_checks_str},
+ 'Failed sChain checks',
+ 'error',
)
)
+
+
+def send_to_statsd(statsd_client: statsd.StatsClient, schain_name: str, checks_dict: dict) -> None:
+ for check, result in checks_dict.items():
+ mname = f'admin.schain_checks.{check}.{no_hyphens(schain_name)}'
+ statsd_client.gauge(mname, int(result))
diff --git a/core/schains/cleaner.py b/core/schains/cleaner.py
index 62c8ca1ab..985089db6 100644
--- a/core/schains/cleaner.py
+++ b/core/schains/cleaner.py
@@ -41,7 +41,7 @@
from core.schains.types import ContainerType
from core.schains.firewall.utils import get_sync_agent_ranges
-from tools.configs import SGX_CERTIFICATES_FOLDER
+from tools.configs import SGX_CERTIFICATES_FOLDER, SYNC_NODE
from tools.configs.schains import SCHAINS_DIR_PATH
from tools.configs.containers import (
SCHAIN_CONTAINER, IMA_CONTAINER, SCHAIN_STOP_TIMEOUT
@@ -224,6 +224,7 @@ def cleanup_schain(
schain_name,
sync_agent_ranges,
rotation_id,
+ last_dkg_successful,
current_nodes,
estate,
dutils=None
@@ -245,7 +246,9 @@ def cleanup_schain(
current_nodes=current_nodes,
rotation_id=rotation_id,
estate=estate,
- dutils=dutils
+ last_dkg_successful=last_dkg_successful,
+ dutils=dutils,
+ sync_node=SYNC_NODE
)
status = checks.get_all()
if status['skaled_container'] or is_exited(
diff --git a/core/schains/cmd.py b/core/schains/cmd.py
index 25e875285..c96e2432e 100644
--- a/core/schains/cmd.py
+++ b/core/schains/cmd.py
@@ -25,7 +25,7 @@
from tools.configs import SGX_SERVER_URL
from tools.configs.containers import DATA_DIR_CONTAINER_PATH, SHARED_SPACE_CONTAINER_PATH
-from tools.configs.ima import IMA_ENDPOINT
+from tools.configs.web3 import ENDPOINT
def get_schain_container_cmd(
@@ -33,10 +33,11 @@ def get_schain_container_cmd(
start_ts: int = None,
download_snapshot: bool = False,
enable_ssl: bool = True,
+ sync_node: bool = False,
snapshot_from: str = ''
) -> str:
"""Returns parameters that will be passed to skaled binary in the sChain container"""
- opts = get_schain_container_base_opts(schain_name, enable_ssl=enable_ssl)
+ opts = get_schain_container_base_opts(schain_name, enable_ssl=enable_ssl, sync_node=sync_node)
if snapshot_from:
opts.extend(['--no-snapshot-majority', snapshot_from])
if download_snapshot:
@@ -54,13 +55,17 @@ def get_schain_container_sync_opts(start_ts: int = None) -> list:
return sync_opts
-def get_schain_container_base_opts(schain_name: str,
- enable_ssl: bool = True) -> list:
+def get_schain_container_base_opts(
+ schain_name: str,
+ enable_ssl: bool = True,
+ sync_node: bool = False
+) -> list:
config_filepath = get_skaled_container_config_path(schain_name)
ssl_key, ssl_cert = get_ssl_filepath()
config = ConfigFileManager(schain_name=schain_name).skaled_config
ports = get_schain_ports_from_config(config)
static_schain_cmd = get_static_schain_cmd()
+
cmd = [
f'--config {config_filepath}',
f'-d {DATA_DIR_CONTAINER_PATH}',
@@ -69,11 +74,15 @@ def get_schain_container_base_opts(schain_name: str,
f'--https-port {ports["https"]}',
f'--ws-port {ports["ws"]}',
f'--wss-port {ports["wss"]}',
- f'--sgx-url {SGX_SERVER_URL}',
- f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data',
- f'--main-net-url {IMA_ENDPOINT}'
+ f'--main-net-url {ENDPOINT}'
]
+ if not sync_node:
+ cmd.extend([
+ f'--sgx-url {SGX_SERVER_URL}',
+ f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data'
+ ])
+
if static_schain_cmd:
cmd.extend(static_schain_cmd)
diff --git a/core/schains/config/generator.py b/core/schains/config/generator.py
index 1992af6e5..fe4eef631 100644
--- a/core/schains/config/generator.py
+++ b/core/schains/config/generator.py
@@ -22,22 +22,28 @@
from skale import Skale
from skale.schain_config.generator import get_schain_nodes_with_schains
+from skale.schain_config.ports_allocation import get_schain_base_port_on_node
from skale.schain_config.rotation_history import get_previous_schain_groups
from etherbase_predeployed import ETHERBASE_ADDRESS
from marionette_predeployed import MARIONETTE_ADDRESS
+from core.node_config import NodeConfig
from core.schains.config.skale_manager_opts import SkaleManagerOpts, init_skale_manager_opts
from core.schains.config.skale_section import SkaleConfig, generate_skale_section
from core.schains.config.predeployed import generate_predeployed_accounts
from core.schains.config.precompiled import generate_precompiled_accounts
from core.schains.config.generation import Gen
+from core.schains.config.static_accounts import is_static_accounts, static_accounts
from core.schains.config.helper import get_chain_id, get_schain_id
+from core.schains.dkg.utils import get_common_bls_public_key
from core.schains.limits import get_schain_type
from tools.helper import read_json
from tools.configs.schains import BASE_SCHAIN_CONFIG_FILEPATH
from tools.helper import is_zero_address, is_address_contract
+from tools.node_options import NodeOptions
+
logger = logging.getLogger(__name__)
@@ -125,16 +131,21 @@ def get_schain_originator(schain: dict):
def generate_schain_config(
schain: dict, node_id: int, node: dict, ecdsa_key_name: str,
- schains_on_node: list, rotation_id: int, schain_nodes_with_schains: list,
+ rotation_id: int, schain_nodes_with_schains: list,
node_groups: list, generation: int, is_owner_contract: bool,
- skale_manager_opts: SkaleManagerOpts
+ skale_manager_opts: SkaleManagerOpts, schain_base_port: int, common_bls_public_keys: list[str],
+ sync_node: bool = False,
+ archive=None, catchup=None
) -> SChainConfig:
"""Main function that is used to generate sChain config"""
logger.info(
f'Going to generate sChain config for {schain["name"]}, '
- f'node_name: {node["name"]}, node_id: {node_id}, rotation_id: {rotation_id}, '
- f'ecdsa keyname: {ecdsa_key_name}'
+ f'node_name: {node["name"]}, node_id: {node_id}, rotation_id: {rotation_id}'
)
+ if sync_node:
+ logger.info(f'Sync node config options: archive: {archive}, catchup: {catchup}')
+ else:
+ logger.info(f'Regular node config options: ecdsa keyname: {ecdsa_key_name}')
on_chain_etherbase = get_on_chain_etherbase(schain, generation)
on_chain_owner = get_on_chain_owner(schain, generation, is_owner_contract)
@@ -151,20 +162,6 @@ def generate_schain_config(
originator_address = get_schain_originator(schain)
- predeployed_accounts = generate_predeployed_accounts(
- schain_name=schain['name'],
- schain_type=schain_type,
- schain_nodes=schain_nodes_with_schains,
- on_chain_owner=on_chain_owner,
- mainnet_owner=mainnet_owner,
- originator_address=originator_address,
- generation=generation
- )
-
- precompiled_accounts = generate_precompiled_accounts(
- on_chain_owner=on_chain_owner
- )
-
skale_config = generate_skale_section(
schain=schain,
on_chain_etherbase=on_chain_etherbase,
@@ -173,13 +170,41 @@ def generate_schain_config(
node_id=node_id,
node=node,
ecdsa_key_name=ecdsa_key_name,
- schains_on_node=schains_on_node,
schain_nodes_with_schains=schain_nodes_with_schains,
rotation_id=rotation_id,
node_groups=node_groups,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ schain_base_port=schain_base_port,
+ common_bls_public_keys=common_bls_public_keys,
+ sync_node=sync_node,
+ archive=archive,
+ catchup=catchup
)
+ accounts = {}
+ if is_static_accounts(schain['name']):
+ logger.info(f'Found static account for {schain["name"]}, going to use in config')
+ accounts = static_accounts(schain['name'])['accounts']
+ else:
+ logger.info('Static accounts not found, generating regular accounts section')
+ predeployed_accounts = generate_predeployed_accounts(
+ schain_name=schain['name'],
+ schain_type=schain_type,
+ schain_nodes=schain_nodes_with_schains,
+ on_chain_owner=on_chain_owner,
+ mainnet_owner=mainnet_owner,
+ originator_address=originator_address,
+ generation=generation
+ )
+ precompiled_accounts = generate_precompiled_accounts(
+ on_chain_owner=on_chain_owner
+ )
+ accounts = {
+ **base_config.config['accounts'],
+ **predeployed_accounts,
+ **precompiled_accounts,
+ }
+
schain_config = SChainConfig(
seal_engine=base_config.config['sealEngine'],
params={
@@ -188,11 +213,7 @@ def generate_schain_config(
},
unddos=base_config.config['unddos'],
genesis=base_config.config['genesis'],
- accounts={
- **base_config.config['accounts'],
- **predeployed_accounts,
- **precompiled_accounts,
- },
+ accounts=accounts,
skale_config=skale_config
)
return schain_config
@@ -202,30 +223,47 @@ def generate_schain_config_with_skale(
skale: Skale,
schain_name: str,
generation: int,
- node_id: int,
+ node_config: NodeConfig,
rotation_data: dict,
- ecdsa_key_name: str
+ ecdsa_key_name: str,
+ sync_node: bool = False,
+ node_options: NodeOptions = NodeOptions()
) -> SChainConfig:
schain_nodes_with_schains = get_schain_nodes_with_schains(skale, schain_name)
- schains_on_node = skale.schains.get_schains_for_node(node_id)
+ schains_on_node = skale.schains.get_schains_for_node(node_config.id)
schain = skale.schains.get_by_name(schain_name)
- node = skale.nodes.get(node_id)
+ node = skale.nodes.get(node_config.id)
node_groups = get_previous_schain_groups(skale, schain_name)
is_owner_contract = is_address_contract(skale.web3, schain['mainnetOwner'])
skale_manager_opts = init_skale_manager_opts(skale)
+ group_index = skale.schains.name_to_id(schain_name)
+ common_bls_public_keys = get_common_bls_public_key(skale, group_index)
+
+ if sync_node:
+ schain_base_port = node_config.schain_base_port
+ else:
+ schain_base_port = get_schain_base_port_on_node(
+ schains_on_node,
+ schain['name'],
+ node['port']
+ )
return generate_schain_config(
schain=schain,
node=node,
- node_id=node_id,
+ node_id=node_config.id,
ecdsa_key_name=ecdsa_key_name,
- schains_on_node=schains_on_node,
rotation_id=rotation_data['rotation_id'],
schain_nodes_with_schains=schain_nodes_with_schains,
node_groups=node_groups,
generation=generation,
is_owner_contract=is_owner_contract,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ schain_base_port=schain_base_port,
+ common_bls_public_keys=common_bls_public_keys,
+ sync_node=sync_node,
+ archive=node_options.archive,
+ catchup=node_options.catchup
)
diff --git a/core/schains/config/helper.py b/core/schains/config/helper.py
index 384f8aae1..e6963f444 100644
--- a/core/schains/config/helper.py
+++ b/core/schains/config/helper.py
@@ -24,6 +24,7 @@
from web3 import Web3
from core.schains.dkg.utils import get_secret_key_share_filepath
+
from tools.helper import read_json
from tools.configs import STATIC_PARAMS_FILEPATH, ENV_TYPE
from tools.helper import safe_load_yml
diff --git a/core/schains/config/main.py b/core/schains/config/main.py
index 0dc962d57..4dd1a1838 100644
--- a/core/schains/config/main.py
+++ b/core/schains/config/main.py
@@ -23,13 +23,16 @@
from skale import Skale
from core.node import get_skale_node_version
+from core.node_config import NodeConfig
from core.schains.config.directory import get_files_with_prefix, schain_config_dir
from core.schains.config.file_manager import ConfigFileManager, SkaledConfigFilename
from core.schains.config.generator import generate_schain_config_with_skale
from tools.configs import SCHAIN_CONFIG_DIR_SKALED
+from tools.str_formatters import arguments_list_string
+from tools.node_options import NodeOptions
-from web.models.schain import upsert_schain_record, SChainRecord
+from web.models.schain import upsert_schain_record
logger = logging.getLogger(__name__)
@@ -37,24 +40,29 @@
def create_new_upstream_config(
skale: Skale,
- node_id: int,
+ node_config: NodeConfig,
schain_name: str,
generation: int,
ecdsa_sgx_key_name: str,
rotation_data: dict,
- stream_version: str,
- schain_record: SChainRecord,
- file_manager: ConfigFileManager
+ sync_node: bool,
+ node_options: NodeOptions
) -> Dict:
- logger.info('Generating sChain config for %s', schain_name)
+ logger.warning(arguments_list_string({
+ 'sChain name': schain_name,
+ 'generation': generation,
+ 'sync_node': sync_node
+ }, 'Generating sChain config'))
schain_config = generate_schain_config_with_skale(
skale=skale,
schain_name=schain_name,
generation=generation,
- node_id=node_id,
+ node_config=node_config,
rotation_data=rotation_data,
- ecdsa_key_name=ecdsa_sgx_key_name
+ ecdsa_key_name=ecdsa_sgx_key_name,
+ sync_node=sync_node,
+ node_options=node_options
)
return schain_config.to_dict()
diff --git a/core/schains/config/node_info.py b/core/schains/config/node_info.py
index 1687c1f5a..e4bb3386b 100644
--- a/core/schains/config/node_info.py
+++ b/core/schains/config/node_info.py
@@ -17,11 +17,11 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import logging
from dataclasses import dataclass
from skale.dataclasses.node_info import NodeInfo
from skale.dataclasses.skaled_ports import SkaledPorts
-from skale.schain_config.ports_allocation import get_schain_base_port_on_node
from core.schains.config.skale_manager_opts import SkaleManagerOpts
from tools.configs import SGX_SSL_KEY_FILEPATH, SGX_SSL_CERT_FILEPATH
@@ -31,6 +31,9 @@
from tools.helper import read_json
+logger = logging.getLogger(__name__)
+
+
@dataclass
class CurrentNodeInfo(NodeInfo):
"""Dataclass that represents nodeInfo key of the skaleConfig section"""
@@ -42,9 +45,13 @@ class CurrentNodeInfo(NodeInfo):
skale_manager_opts: SkaleManagerOpts
static_node_info: dict
+ sync_node: bool
+ archive: bool
+ catchup: bool
+
def to_dict(self):
"""Returns camel-case representation of the CurrentNodeInfo object"""
- return {
+ node_info = {
**super().to_dict(),
**{
'imaMessageProxySChain': self.ima_message_proxy_schain,
@@ -53,55 +60,86 @@ def to_dict(self):
'wallets': self.wallets,
'imaMonitoringPort': self.base_port + SkaledPorts.IMA_MONITORING.value,
'skale-manager': self.skale_manager_opts.to_dict(),
+ 'syncNode': self.sync_node,
'info-acceptors': 1,
**self.static_node_info
}
}
+ if self.sync_node:
+ node_info['archiveMode'] = self.archive
+ node_info['syncFromCatchup'] = self.catchup
+ return node_info
def generate_current_node_info(
node: dict, node_id: int, ecdsa_key_name: str, static_node_info: dict,
- schain: dict, schains_on_node: list, rotation_id: int, skale_manager_opts: SkaleManagerOpts
+ schain: dict, rotation_id: int,
+ nodes_in_schain: int,
+ skale_manager_opts: SkaleManagerOpts,
+ schain_base_port: int,
+ common_bls_public_keys: list[str],
+ sync_node: bool = False, archive: bool = False, catchup: bool = False
) -> CurrentNodeInfo:
- schain_base_port_on_node = get_schain_base_port_on_node(
- schains_on_node,
+ wallets = generate_wallets_config(
schain['name'],
- node['port']
+ rotation_id,
+ sync_node,
+ nodes_in_schain,
+ common_bls_public_keys
)
+
+ if ecdsa_key_name is None:
+ ecdsa_key_name = ''
+
return CurrentNodeInfo(
node_id=node_id,
name=node['name'],
- base_port=schain_base_port_on_node,
+ base_port=schain_base_port,
ecdsa_key_name=ecdsa_key_name,
- wallets=generate_wallets_config(schain['name'], rotation_id),
+ wallets=wallets,
skale_manager_opts=skale_manager_opts,
+ sync_node=sync_node,
+ archive=archive,
+ catchup=catchup,
static_node_info=static_node_info,
**get_message_proxy_addresses()
)
-def generate_wallets_config(schain_name: str, rotation_id: int) -> dict:
- secret_key_share_filepath = get_secret_key_share_filepath(schain_name, rotation_id)
- secret_key_share_config = read_json(secret_key_share_filepath)
+def generate_wallets_config(
+ schain_name: str,
+ rotation_id: int,
+ sync_node: bool,
+ nodes_in_schain: int,
+ common_bls_public_keys: str
+) -> dict:
+ wallets = {'ima': {}}
+ formatted_common_pk = {}
+
+ for (i, value) in enumerate(common_bls_public_keys):
+ name = 'commonBLSPublicKey' + str(i)
+ formatted_common_pk[name] = str(value)
+
+ wallets['ima'].update({
+ 'n': nodes_in_schain,
+ **formatted_common_pk
+ })
+
+ if not sync_node:
+ secret_key_share_filepath = get_secret_key_share_filepath(schain_name, rotation_id)
+ secret_key_share_config = read_json(secret_key_share_filepath)
- wallets = {
- 'ima': {
+ wallets['ima'].update({
'keyShareName': secret_key_share_config['key_share_name'],
't': secret_key_share_config['t'],
- 'n': secret_key_share_config['n'],
'certFile': SGX_SSL_CERT_FILEPATH,
- 'keyFile': SGX_SSL_KEY_FILEPATH
- }
- }
- common_public_keys = secret_key_share_config['common_public_key']
- for (i, value) in enumerate(common_public_keys):
- name = 'commonBLSPublicKey' + str(i)
- wallets['ima'][name] = str(value)
+ 'keyFile': SGX_SSL_KEY_FILEPATH,
+ })
- public_keys = secret_key_share_config['public_key']
- for (i, value) in enumerate(public_keys):
- name = 'BLSPublicKey' + str(i)
- wallets['ima'][name] = str(value)
+ public_keys = secret_key_share_config['public_key']
+ for (i, value) in enumerate(public_keys):
+ name = 'BLSPublicKey' + str(i)
+ wallets['ima'][name] = str(value)
return wallets
diff --git a/core/schains/config/schain_info.py b/core/schains/config/schain_info.py
index 738d3cc52..d6a5bab83 100644
--- a/core/schains/config/schain_info.py
+++ b/core/schains/config/schain_info.py
@@ -22,6 +22,8 @@
from core.schains.limits import get_schain_limit, get_schain_type
from core.schains.types import MetricType
+from tools.configs.schains import MAX_CONSENSUS_STORAGE_INF_VALUE
+
@dataclass
class SChainInfo:
@@ -69,10 +71,14 @@ def generate_schain_info(
on_chain_etherbase: str,
static_schain_info: dict,
node_groups: dict,
- nodes: dict
+ nodes: dict,
+ sync_node: bool,
+ archive: bool
) -> SChainInfo:
schain_type = get_schain_type(schain['partOfNode'])
volume_limits = get_schain_limit(schain_type, MetricType.volume_limits)
+ if sync_node and archive:
+ volume_limits['max_consensus_storage_bytes'] = MAX_CONSENSUS_STORAGE_INF_VALUE
leveldb_limits = get_schain_limit(schain_type, MetricType.leveldb_limits)
contract_storage_limit = leveldb_limits['contract_storage']
db_storage_limit = leveldb_limits['db_storage']
diff --git a/core/schains/config/schain_node.py b/core/schains/config/schain_node.py
index 5530c0df0..0a3346f37 100644
--- a/core/schains/config/schain_node.py
+++ b/core/schains/config/schain_node.py
@@ -54,9 +54,19 @@ def to_dict(self):
}
-def generate_schain_nodes(schain_nodes_with_schains: list, schain_name, rotation_id: int):
+def generate_schain_nodes(
+ schain_nodes_with_schains: list,
+ schain_name: str,
+ rotation_id: int,
+ sync_node: bool = False
+):
schain_nodes = []
- bls_public_keys = get_bls_public_keys(schain_name, rotation_id)
+
+ if sync_node:
+ bls_public_keys = ["0:0:1:0"] * len(schain_nodes_with_schains)
+ else:
+ bls_public_keys = get_bls_public_keys(schain_name, rotation_id)
+
for i, node in enumerate(schain_nodes_with_schains, 1):
base_port = get_schain_base_port_on_node(node['schains'], schain_name, node['port'])
node_info = SChainNodeInfo(
diff --git a/core/schains/config/skale_section.py b/core/schains/config/skale_section.py
index 4fd53e0eb..340644be1 100644
--- a/core/schains/config/skale_section.py
+++ b/core/schains/config/skale_section.py
@@ -48,8 +48,10 @@ def to_dict(self):
def generate_skale_section(
schain: dict, on_chain_etherbase: str, on_chain_owner: str, schain_id: int, node_id: int,
- node: dict, ecdsa_key_name: str, schains_on_node: list, schain_nodes_with_schains: list,
- rotation_id: int, node_groups: dict, skale_manager_opts: SkaleManagerOpts
+ node: dict, ecdsa_key_name: str, schain_nodes_with_schains: list,
+ rotation_id: int, node_groups: dict, skale_manager_opts: SkaleManagerOpts,
+ schain_base_port: int,
+ common_bls_public_keys: list[str], sync_node: bool = False, archive=None, catchup=None
) -> SkaleConfig:
contract_settings = generate_contract_settings(
on_chain_owner=on_chain_owner,
@@ -58,7 +60,8 @@ def generate_skale_section(
schain_type = get_schain_type(schain['partOfNode'])
static_node_info = get_static_node_info(schain_type)
- static_schain_info = get_static_schain_info()
+ static_schain_info = get_static_schain_info(schain['name'])
+ nodes_in_schain = len(schain_nodes_with_schains)
node_info = generate_current_node_info(
node_id=node_id,
@@ -66,15 +69,21 @@ def generate_skale_section(
ecdsa_key_name=ecdsa_key_name,
static_node_info=static_node_info,
schain=schain,
- schains_on_node=schains_on_node,
rotation_id=rotation_id,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ schain_base_port=schain_base_port,
+ nodes_in_schain=nodes_in_schain,
+ common_bls_public_keys=common_bls_public_keys,
+ sync_node=sync_node,
+ archive=archive,
+ catchup=catchup
)
schain_nodes = generate_schain_nodes(
schain_nodes_with_schains=schain_nodes_with_schains,
schain_name=schain['name'],
- rotation_id=rotation_id
+ rotation_id=rotation_id,
+ sync_node=sync_node
)
schain_info = generate_schain_info(
@@ -83,7 +92,9 @@ def generate_skale_section(
on_chain_etherbase=on_chain_etherbase,
static_schain_info=static_schain_info,
nodes=schain_nodes,
- node_groups=node_groups
+ node_groups=node_groups,
+ sync_node=sync_node,
+ archive=archive
)
return SkaleConfig(
diff --git a/core/schains/monitor/post_rotation_monitor.py b/core/schains/config/static_accounts.py
similarity index 53%
rename from core/schains/monitor/post_rotation_monitor.py
rename to core/schains/config/static_accounts.py
index 8200ab5a9..989da99f8 100644
--- a/core/schains/monitor/post_rotation_monitor.py
+++ b/core/schains/config/static_accounts.py
@@ -2,7 +2,7 @@
#
# This file is part of SKALE Admin
#
-# Copyright (C) 2021 SKALE Labs
+# Copyright (C) 2023-Present SKALE Labs
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -17,22 +17,22 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import logging
+import os
-from core.schains.monitor.base_monitor import BaseMonitor
+from tools.helper import read_json
+from tools.configs import STATIC_ACCOUNTS_FOLDER, ENV_TYPE
-logger = logging.getLogger(__name__)
+def static_accounts(schain_name: str) -> dict:
+ return read_json(static_accounts_filepath(schain_name))
-class PostRotationMonitor(BaseMonitor):
- """
- PostRotationMonitor be executed for the sChain on the staying node when rotation is complete.
- This type of monitor reloads skaled container.
- """
- @BaseMonitor.monitor_runner
- def run(self):
- logger.info(f'{self.p} was stopped after rotation. Going to restart')
- self.config(overwrite=True)
- self.firewall_rules()
- self.recreated_schain_containers()
+def is_static_accounts(schain_name: str) -> bool:
+ return os.path.isfile(static_accounts_filepath(schain_name))
+
+
+def static_accounts_filepath(schain_name: str) -> str:
+ static_accounts_env_path = os.path.join(STATIC_ACCOUNTS_FOLDER, ENV_TYPE)
+ if not os.path.isdir(static_accounts_env_path):
+ return ''
+ return os.path.join(static_accounts_env_path, f'schain-{schain_name}.json')
diff --git a/core/schains/config/static_params.py b/core/schains/config/static_params.py
index 05bff696d..83d140c12 100644
--- a/core/schains/config/static_params.py
+++ b/core/schains/config/static_params.py
@@ -27,9 +27,22 @@ def get_static_schain_cmd(env_type: str = ENV_TYPE) -> list:
return static_params['schain_cmd']
-def get_static_schain_info(env_type: str = ENV_TYPE) -> dict:
+def get_static_schain_info(schain_name: str, env_type: str = ENV_TYPE) -> dict | None:
static_params = get_static_params(env_type)
- return static_params['schain']
+ static_params_schain = static_params['schain']
+ processed_params = {}
+ for param_name, param in static_params_schain.items():
+ processed_params[param_name] = get_schain_static_param(param, schain_name)
+ return processed_params
+
+
+def get_schain_static_param(static_param_schain: dict | int, schain_name: str) -> int:
+ if isinstance(static_param_schain, int):
+ return static_param_schain
+ elif isinstance(static_param_schain, dict) and schain_name in static_param_schain:
+ return static_param_schain[schain_name]
+ else:
+ return static_param_schain.get('default', None)
def get_static_node_info(schain_type: SchainType, env_type: str = ENV_TYPE) -> dict:
diff --git a/core/schains/dkg/client.py b/core/schains/dkg/client.py
index 30454ea4b..bf8c0db3a 100644
--- a/core/schains/dkg/client.py
+++ b/core/schains/dkg/client.py
@@ -29,7 +29,9 @@
from core.schains.dkg.broadcast_filter import Filter
from core.schains.dkg.structures import ComplaintReason, DKGStep
+from tools.helper import no_hyphens
from tools.configs import NODE_DATA_PATH, SGX_CERTIFICATES_FOLDER
+from tools.resources import get_statsd_client
from tools.sgx_utils import sgx_unreachable_retry
sys.path.insert(0, NODE_DATA_PATH)
@@ -77,7 +79,7 @@ def convert_g2_point_to_hex(data):
data_hexed = ''
for coord in data:
temp = hex(int(coord))[2:]
- while (len(temp) < 64):
+ while len(temp) < 64:
temp = '0' + temp
data_hexed += temp
return data_hexed
@@ -87,7 +89,7 @@ def convert_hex_to_g2_array(data):
g2_array = []
while len(data) > 0:
cur = data[:256]
- g2_array.append([str(x) for x in [int(cur[64 * i:64 * i + 64], 16) for i in range(4)]])
+ g2_array.append([str(x) for x in [int(cur[64 * i: 64 * i + 64], 16) for i in range(4)]])
data = data[256:]
return g2_array
@@ -102,7 +104,7 @@ def convert_str_to_key_share(sent_secret_key_contribution, n):
def convert_key_share_to_str(data, n):
- return "".join(to_verify(s) for s in [data[i * 192:(i + 1) * 192] for i in range(n)])
+ return ''.join(to_verify(s) for s in [data[i * 192: (i + 1) * 192] for i in range(n)])
def to_verify(share):
@@ -111,24 +113,24 @@ def to_verify(share):
def generate_poly_name(group_index_str, node_id, dkg_id):
return (
- "POLY:SCHAIN_ID:"
- f"{group_index_str}"
- ":NODE_ID:"
- f"{str(node_id)}"
- ":DKG_ID:"
- f"{str(dkg_id)}"
- )
+ 'POLY:SCHAIN_ID:'
+ f'{group_index_str}'
+ ':NODE_ID:'
+ f'{str(node_id)}'
+ ':DKG_ID:'
+ f'{str(dkg_id)}'
+ )
def generate_bls_key_name(group_index_str, node_id, dkg_id):
return (
- "BLS_KEY:SCHAIN_ID:"
- f"{group_index_str}"
- ":NODE_ID:"
- f"{str(node_id)}"
- ":DKG_ID:"
- f"{str(dkg_id)}"
- )
+ 'BLS_KEY:SCHAIN_ID:'
+ f'{group_index_str}'
+ ':NODE_ID:'
+ f'{str(node_id)}'
+ ':DKG_ID:'
+ f'{str(dkg_id)}'
+ )
class DKGClient:
@@ -145,10 +147,11 @@ def __init__(
node_ids_contract,
eth_key_name,
rotation_id,
- step: DKGStep = DKGStep.NONE
+ step: DKGStep = DKGStep.NONE,
):
- self.sgx = SgxClient(os.environ['SGX_SERVER_URL'], n=n, t=t,
- path_to_cert=SGX_CERTIFICATES_FOLDER)
+ self.sgx = SgxClient(
+ os.environ['SGX_SERVER_URL'], n=n, t=t, path_to_cert=SGX_CERTIFICATES_FOLDER
+ )
self.schain_name = schain_name
self.group_index = skale.schains.name_to_group_id(schain_name)
self.node_id_contract = node_id_contract
@@ -168,12 +171,24 @@ def __init__(
self.node_ids_contract = node_ids_contract
self.dkg_contract_functions = self.skale.dkg.contract.functions
self.dkg_timeout = self.skale.constants_holder.get_dkg_timeout()
- self.complaint_error_event_hash = self.skale.web3.to_hex(self.skale.web3.keccak(
- text="ComplaintError(string)"
- ))
- self.last_completed_step = step # last step
+ self.complaint_error_event_hash = self.skale.web3.to_hex(
+ self.skale.web3.keccak(text='ComplaintError(string)')
+ )
+ self.statsd_client = get_statsd_client()
+ self._last_completed_step = step # last step
logger.info(f'sChain: {self.schain_name}. DKG timeout is {self.dkg_timeout}')
+ @property
+ def last_completed_step(self) -> DKGStep:
+ return self._last_completed_step
+
+ @last_completed_step.setter
+ def last_completed_step(self, step: DKGStep):
+ self.statsd_client.gauge(
+ f'admin.schains.dkg.last_completed_step.{no_hyphens(self.schain_name)}', step.value
+ )
+ self._last_completed_step = step
+
def is_channel_opened(self):
return self.skale.dkg.is_channel_opened(self.group_index)
@@ -203,9 +218,9 @@ def verification_vector(self):
@sgx_unreachable_retry
def secret_key_contribution(self):
- self.sent_secret_key_contribution = self.sgx.get_secret_key_contribution_v2(self.poly_name,
- self.public_keys
- )
+ self.sent_secret_key_contribution = self.sgx.get_secret_key_contribution_v2(
+ self.poly_name, self.public_keys
+ )
self.incoming_secret_key_contribution[self.node_id_dkg] = self.sent_secret_key_contribution[
self.node_id_dkg * 192: (self.node_id_dkg + 1) * 192
]
@@ -222,12 +237,14 @@ def broadcast(self):
)
is_broadcast_possible = self.skale.dkg.contract.functions.isBroadcastPossible(
- self.group_index, self.node_id_contract).call({'from': self.skale.wallet.address})
+ self.group_index, self.node_id_contract
+ ).call({'from': self.skale.wallet.address})
channel_opened = self.is_channel_opened()
if not is_broadcast_possible or not channel_opened:
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node could not sent broadcast')
+ logger.info(
+ f'sChain: {self.schain_name}. ' f'{self.node_id_dkg} node could not sent broadcast'
+ )
return
verification_vector = self.verification_vector()
@@ -238,7 +255,7 @@ def broadcast(self):
self.node_id_contract,
verification_vector,
secret_key_contribution,
- self.rotation_id
+ self.rotation_id,
)
self.last_completed_step = DKGStep.BROADCAST
logger.info('Everything is sent from %d node', self.node_id_dkg)
@@ -251,28 +268,31 @@ def receive_from_node(self, from_node, broadcasted_data):
try:
if not self.verification(from_node):
raise DkgVerificationError(
- f"sChain: {self.schain_name}. "
- f"Fatal error : user {str(from_node + 1)} "
+ f'sChain: {self.schain_name}. '
+ f'Fatal error : user {str(from_node + 1)} '
f"hasn't passed verification by user {str(self.node_id_dkg + 1)}"
)
- logger.info(f'sChain: {self.schain_name}. '
- f'All data from {from_node} was received and verified')
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ f'All data from {from_node} was received and verified'
+ )
except SgxUnreachableError as e:
raise SgxUnreachableError(
- f"sChain: {self.schain_name}. "
- f"Fatal error : user {str(from_node + 1)} "
- f"hasn't passed verification by user {str(self.node_id_dkg + 1)}"
- f"with SgxUnreachableError: ", e
- )
+ f'sChain: {self.schain_name}. '
+ f'Fatal error : user {str(from_node + 1)} '
+ f"hasn't passed verification by user {str(self.node_id_dkg + 1)}"
+ f'with SgxUnreachableError: ',
+ e,
+ )
@sgx_unreachable_retry
def verification(self, from_node):
- return self.sgx.verify_secret_share_v2(self.incoming_verification_vector[from_node],
- self.eth_key_name,
- to_verify(
- self.incoming_secret_key_contribution[from_node]
- ),
- self.node_id_dkg)
+ return self.sgx.verify_secret_share_v2(
+ self.incoming_verification_vector[from_node],
+ self.eth_key_name,
+ to_verify(self.incoming_secret_key_contribution[from_node]),
+ self.node_id_dkg,
+ )
@sgx_unreachable_retry
def is_bls_key_generated(self):
@@ -287,17 +307,20 @@ def is_bls_key_generated(self):
@sgx_unreachable_retry
def generate_bls_key(self):
- received_secret_key_contribution = "".join(to_verify(
- self.incoming_secret_key_contribution[j]
- )
- for j in range(self.sgx.n))
- logger.info(f'sChain: {self.schain_name}. '
- f'DKGClient is going to create BLS private key with name {self.bls_name}')
- bls_private_key = self.sgx.create_bls_private_key_v2(self.poly_name, self.bls_name,
- self.eth_key_name,
- received_secret_key_contribution)
- logger.info(f'sChain: {self.schain_name}. '
- 'DKGClient is going to fetch BLS public key with name {self.bls_name}')
+ received_secret_key_contribution = ''.join(
+ to_verify(self.incoming_secret_key_contribution[j]) for j in range(self.sgx.n)
+ )
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ f'DKGClient is going to create BLS private key with name {self.bls_name}'
+ )
+ bls_private_key = self.sgx.create_bls_private_key_v2(
+ self.poly_name, self.bls_name, self.eth_key_name, received_secret_key_contribution
+ )
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ 'DKGClient is going to fetch BLS public key with name {self.bls_name}'
+ )
self.public_key = self.sgx.get_bls_public_key(self.bls_name)
return bls_private_key
@@ -315,72 +338,70 @@ def get_bls_public_keys(self):
def alright(self):
logger.info(f'sChain {self.schain_name} sending alright transaction')
is_alright_possible = self.skale.dkg.is_alright_possible(
- self.group_index, self.node_id_contract, self.skale.wallet.address)
+ self.group_index, self.node_id_contract, self.skale.wallet.address
+ )
if not is_alright_possible or not self.is_channel_opened():
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node could not sent an alright note')
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ f'{self.node_id_dkg} node could not sent an alright note'
+ )
return
self.skale.dkg.alright(
- self.group_index,
- self.node_id_contract,
- gas_limit=ALRIGHT_GAS_LIMIT,
- multiplier=2
+ self.group_index, self.node_id_contract, gas_limit=ALRIGHT_GAS_LIMIT, multiplier=2
)
self.last_completed_step = DKGStep.ALRIGHT
logger.info(f'sChain: {self.schain_name}. {self.node_id_dkg} node sent an alright note')
def send_complaint(self, to_node: int, reason: ComplaintReason):
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node is trying to sent a {reason} on {to_node} node')
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ f'{self.node_id_dkg} node is trying to sent a {reason} on {to_node} node'
+ )
is_complaint_possible = self.skale.dkg.is_complaint_possible(
- self.group_index, self.node_id_contract, self.node_ids_dkg[to_node],
- self.skale.wallet.address
+ self.group_index,
+ self.node_id_contract,
+ self.node_ids_dkg[to_node],
+ self.skale.wallet.address,
)
is_channel_opened = self.is_channel_opened()
logger.info(
- 'Complaint possible %s, channel opened %s',
- is_complaint_possible,
- is_channel_opened
+ 'Complaint possible %s, channel opened %s', is_complaint_possible, is_channel_opened
)
if not is_complaint_possible or not is_channel_opened:
- logger.info(
- '%d node could not sent a complaint on %d node',
- self.node_id_dkg,
- to_node
- )
+ logger.info('%d node could not sent a complaint on %d node', self.node_id_dkg, to_node)
return False
reason_to_step = {
ComplaintReason.NO_BROADCAST: DKGStep.COMPLAINT_NO_BROADCAST,
ComplaintReason.BAD_DATA: DKGStep.COMPLAINT_BAD_DATA,
ComplaintReason.NO_ALRIGHT: DKGStep.COMPLAINT_NO_ALRIGHT,
- ComplaintReason.NO_RESPONSE: DKGStep.COMPLAINT_NO_RESPONSE
+ ComplaintReason.NO_RESPONSE: DKGStep.COMPLAINT_NO_RESPONSE,
}
try:
if reason == ComplaintReason.BAD_DATA:
tx_res = self.skale.dkg.complaint_bad_data(
- self.group_index,
- self.node_id_contract,
- self.node_ids_dkg[to_node]
+ self.group_index, self.node_id_contract, self.node_ids_dkg[to_node]
)
else:
tx_res = self.skale.dkg.complaint(
- self.group_index,
- self.node_id_contract,
- self.node_ids_dkg[to_node]
+ self.group_index, self.node_id_contract, self.node_ids_dkg[to_node]
)
if self.check_complaint_logs(tx_res.receipt['logs'][0]):
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node sent a complaint on {to_node} node')
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ f'{self.node_id_dkg} node sent a complaint on {to_node} node'
+ )
self.last_completed_step = reason_to_step[reason]
return True
else:
- logger.info(f'sChain: {self.schain_name}. Complaint from {self.node_id_dkg} on '
- f'{to_node} node was rejected')
+ logger.info(
+ f'sChain: {self.schain_name}. Complaint from {self.node_id_dkg} on '
+ f'{to_node} node was rejected'
+ )
return False
except TransactionFailedError as e:
logger.error(f'DKG complaint failed: sChain {self.schain_name}')
@@ -389,8 +410,7 @@ def send_complaint(self, to_node: int, reason: ComplaintReason):
@sgx_unreachable_retry
def get_complaint_response(self, to_node_index):
response = self.sgx.complaint_response(
- self.poly_name,
- self.node_ids_contract[to_node_index]
+ self.poly_name, self.node_ids_contract[to_node_index]
)
share, dh_key = response.share, response.dh_key
verification_vector_mult = response.verification_vector_mult
@@ -402,11 +422,13 @@ def get_complaint_response(self, to_node_index):
def response(self, to_node_index):
is_pre_response_possible = self.skale.dkg.is_pre_response_possible(
- self.group_index, self.node_id_contract, self.skale.wallet.address)
+ self.group_index, self.node_id_contract, self.skale.wallet.address
+ )
if not is_pre_response_possible or not self.is_channel_opened():
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node could not sent a response')
+ logger.info(
+ f'sChain: {self.schain_name}. ' f'{self.node_id_dkg} node could not sent a response'
+ )
return
share, dh_key, verification_vector_mult = self.get_complaint_response(to_node_index)
@@ -417,24 +439,22 @@ def response(self, to_node_index):
self.node_id_contract,
convert_g2_points_to_array(self.incoming_verification_vector[self.node_id_dkg]),
convert_g2_points_to_array(verification_vector_mult),
- convert_str_to_key_share(self.sent_secret_key_contribution, self.n)
+ convert_str_to_key_share(self.sent_secret_key_contribution, self.n),
)
self.last_completed_step = DKGStep.PRE_RESPONSE
is_response_possible = self.skale.dkg.is_response_possible(
- self.group_index, self.node_id_contract, self.skale.wallet.address)
+ self.group_index, self.node_id_contract, self.skale.wallet.address
+ )
if not is_response_possible or not self.is_channel_opened():
- logger.info(f'sChain: {self.schain_name}. '
- f'{self.node_id_dkg} node could not sent a response')
+ logger.info(
+ f'sChain: {self.schain_name}. '
+ f'{self.node_id_dkg} node could not sent a response'
+ )
return
- self.skale.dkg.response(
- self.group_index,
- self.node_id_contract,
- int(dh_key, 16),
- share
- )
+ self.skale.dkg.response(self.group_index, self.node_id_contract, int(dh_key, 16), share)
self.last_completed_step = DKGStep.RESPONSE
logger.info(f'sChain: {self.schain_name}. {self.node_id_dkg} node sent a response')
except TransactionFailedError as e:
@@ -450,8 +470,7 @@ def fetch_all_broadcasted_data(self):
broadcasted_data = [event.verificationVector, event.secretKeyContribution]
self.store_broadcasted_data(broadcasted_data, from_node)
logger.info(
- f'sChain: {self.schain_name}. Received by {self.node_id_dkg} from '
- f'{from_node}'
+ f'sChain: {self.schain_name}. Received by {self.node_id_dkg} from ' f'{from_node}'
)
def is_all_data_received(self, from_node):
diff --git a/core/schains/dkg/utils.py b/core/schains/dkg/utils.py
index 92ec9c99c..585c36ce7 100644
--- a/core/schains/dkg/utils.py
+++ b/core/schains/dkg/utils.py
@@ -88,6 +88,37 @@ def init_dkg_client(node_id, schain_name, skale, sgx_eth_key_name, rotation_id):
return dkg_client
+def sync_broadcast_data(dkg_client, dkg_filter, is_received, is_correct, broadcasts_found):
+ if dkg_client.is_everyone_broadcasted():
+ events = dkg_filter.get_events(from_channel_started_block=True)
+ else:
+ events = dkg_filter.get_events()
+ for event in events:
+ from_node = dkg_client.node_ids_contract[event.nodeIndex]
+ if is_received[from_node] and from_node != dkg_client.node_id_dkg:
+ continue
+ else:
+ is_received[from_node] = True
+ broadcasted_data = [event.verificationVector, event.secretKeyContribution]
+ is_received[from_node] = True
+ if from_node != dkg_client.node_id_dkg:
+ logger.info(f'sChain {dkg_client.schain_name}: receiving from node {from_node}')
+ try:
+ dkg_client.receive_from_node(from_node, broadcasted_data)
+ is_correct[from_node] = True
+ broadcasts_found.append(event.nodeIndex)
+ except DkgVerificationError as e:
+ logger.error(e)
+ continue
+ logger.info(
+ f'sChain: {dkg_client.schain_name}. Received by {dkg_client.node_id_dkg} from '
+ f'{from_node}'
+ )
+ logger.info(f'sChain {dkg_client.schain_name}: total received {len(broadcasts_found)} '
+ f'broadcasts from nodes {broadcasts_found}')
+ return (is_received, is_correct, broadcasts_found)
+
+
def receive_broadcast_data(dkg_client: DKGClient) -> BroadcastResult:
n = dkg_client.n
schain_name = dkg_client.schain_name
@@ -108,38 +139,15 @@ def receive_broadcast_data(dkg_client: DKGClient) -> BroadcastResult:
while False in is_received:
time_gone = get_latest_block_timestamp(dkg_client.skale) - start_time
+ time_left = max(dkg_client.dkg_timeout - time_gone, 0)
+ logger.info(f'sChain {schain_name}: trying to receive broadcasted data,'
+ f'{time_left} seconds left')
+ is_received, is_correct, broadcasts_found = sync_broadcast_data(dkg_client, dkg_filter,
+ is_received, is_correct,
+ broadcasts_found)
if time_gone > dkg_client.dkg_timeout:
break
- logger.info(f'sChain {schain_name}: trying to receive broadcasted data,'
- f'{dkg_client.dkg_timeout - time_gone} seconds left')
- if dkg_client.is_everyone_broadcasted():
- events = dkg_filter.get_events(from_channel_started_block=True)
- else:
- events = dkg_filter.get_events()
- for event in events:
- from_node = dkg_client.node_ids_contract[event.nodeIndex]
- if is_received[from_node] and from_node != dkg_client.node_id_dkg:
- continue
- else:
- is_received[from_node] = True
- broadcasted_data = [event.verificationVector, event.secretKeyContribution]
- is_received[from_node] = True
- if from_node != dkg_client.node_id_dkg:
- logger.info(f'sChain {schain_name}: receiving from node {from_node}')
- try:
- dkg_client.receive_from_node(from_node, broadcasted_data)
- is_correct[from_node] = True
- broadcasts_found.append(event.nodeIndex)
- except DkgVerificationError as e:
- logger.error(e)
- continue
- logger.info(
- f'sChain: {schain_name}. Received by {dkg_client.node_id_dkg} from '
- f'{from_node}'
- )
- logger.info(f'sChain {schain_name}: total received {len(broadcasts_found)} broadcasts'
- f' from nodes {broadcasts_found}')
sleep(BROADCAST_DATA_SEARCH_SLEEP)
return BroadcastResult(correct=is_correct, received=is_received)
@@ -169,17 +177,12 @@ def generate_bls_keys(dkg_client):
dkg_client.fetch_bls_public_key()
bls_public_keys = dkg_client.get_bls_public_keys()
- common_public_key = skale.key_storage.get_common_public_key(dkg_client.group_index)
- formated_common_public_key = [
- elem
- for coord in common_public_key
- for elem in coord
- ]
+ common_public_key = get_common_bls_public_key(skale, dkg_client.group_index)
except Exception as err:
raise DKGKeyGenerationError(err)
dkg_client.last_completed_step = DKGStep.KEY_GENERATION
return {
- 'common_public_key': formated_common_public_key,
+ 'common_public_key': common_public_key,
'public_key': dkg_client.public_key,
'bls_public_keys': bls_public_keys,
't': dkg_client.t,
@@ -188,6 +191,15 @@ def generate_bls_keys(dkg_client):
}
+def get_common_bls_public_key(skale, group_index: str) -> list[str]:
+ raw_common_public_key = skale.key_storage.get_common_public_key(group_index)
+ return [
+ elem
+ for coord in raw_common_public_key
+ for elem in coord
+ ]
+
+
def send_complaint(dkg_client: DKGClient, index: int, reason: ComplaintReason):
channel_started_time = dkg_client.skale.dkg.get_channel_started_time(dkg_client.group_index)
reason_to_missing = {
diff --git a/core/schains/firewall/rule_controller.py b/core/schains/firewall/rule_controller.py
index 2fda28168..51e8920a8 100644
--- a/core/schains/firewall/rule_controller.py
+++ b/core/schains/firewall/rule_controller.py
@@ -87,7 +87,7 @@ def get_missing(self) -> Dict['str', Any]:
return missing
def is_configured(self) -> bool:
- return all((self.base_port, self.own_ip, self.node_ips))
+ return all((self.base_port, self.node_ips))
def configure(
self,
diff --git a/core/schains/ima.py b/core/schains/ima.py
index 8cace6162..ded3ce80a 100644
--- a/core/schains/ima.py
+++ b/core/schains/ima.py
@@ -28,21 +28,21 @@
from core.schains.config.directory import schain_config_dir
from core.schains.config.file_manager import ConfigFileManager
-from core.schains.config.helper import get_schain_ports_from_config, get_chain_id
+from core.schains.config.helper import get_chain_id, get_schain_ports_from_config, get_static_params
from core.ima.schain import get_schain_ima_abi_filepath
from tools.configs import ENV_TYPE, SGX_SSL_KEY_FILEPATH, SGX_SSL_CERT_FILEPATH, SGX_SERVER_URL
-from tools.configs.containers import CONTAINERS_INFO, IMA_MIGRATION_PATH
+from tools.configs.containers import IMA_MIGRATION_PATH, CONTAINERS_INFO
from tools.configs.db import REDIS_URI
from tools.configs.ima import (
- IMA_ENDPOINT,
MAINNET_IMA_ABI_FILEPATH,
IMA_STATE_CONTAINER_PATH,
- IMA_TIME_FRAMING,
- IMA_NETWORK_BROWSER_FILEPATH
+ IMA_NETWORK_BROWSER_FILEPATH,
+ DEFAULT_TIME_FRAME
)
from tools.configs.schains import SCHAINS_DIR_PATH
-from tools.configs.web3 import ABI_FILEPATH
from tools.helper import safe_load_yml
+from tools.configs.web3 import ABI_FILEPATH, ENDPOINT
+
logger = logging.getLogger(__name__)
@@ -145,7 +145,7 @@ def schain_index_to_node_number(node):
return int(node['schainIndex']) - 1
-def get_ima_env(schain_name: str, mainnet_chain_id: int) -> ImaEnv:
+def get_ima_env(schain_name: str, mainnet_chain_id: int, time_frame: int) -> ImaEnv:
schain_config = ConfigFileManager(schain_name).skaled_config
node_info = schain_config["skaleConfig"]["nodeInfo"]
bls_key_name = node_info['wallets']['ima']['keyShareName']
@@ -166,7 +166,7 @@ def get_ima_env(schain_name: str, mainnet_chain_id: int) -> ImaEnv:
state_file=IMA_STATE_CONTAINER_PATH,
schain_name=schain_name,
schain_rpc_url=get_localhost_http_endpoint(schain_name),
- mainnet_rpc_url=IMA_ENDPOINT,
+ mainnet_rpc_url=ENDPOINT,
node_number=schain_index,
nodes_count=len(schain_nodes['nodes']),
sgx_url=SGX_SERVER_URL,
@@ -180,7 +180,7 @@ def get_ima_env(schain_name: str, mainnet_chain_id: int) -> ImaEnv:
cid_schain=schain_chain_id,
monitoring_port=node_info['imaMonitoringPort'],
rpc_port=get_ima_rpc_port(schain_name),
- time_framing=IMA_TIME_FRAMING,
+ time_framing=time_frame,
network_browser_data_path=IMA_NETWORK_BROWSER_FILEPATH
)
@@ -274,9 +274,23 @@ def get_ima_log_checks():
return all_ima_healthchecks
-def get_migration_schedule() -> dict:
- return safe_load_yml(IMA_MIGRATION_PATH)[ENV_TYPE]
-
-
-def get_migration_ts(name: str) -> int:
- return get_migration_schedule().get(name, 0)
+def get_migration_ts(name: str, path: str = IMA_MIGRATION_PATH, env_type: str = ENV_TYPE) -> int:
+ if os.path.isfile(path):
+ schedule = safe_load_yml(IMA_MIGRATION_PATH)[env_type]
+ return schedule.get(name, 0)
+ else:
+ return 0
+
+
+def get_ima_time_frame(name: str, after: bool = False) -> int:
+ params = get_static_params()
+ if 'ima' not in params or 'time_frame' not in params['ima']:
+ logger.debug(
+ 'IMA time frame intrerval is not set. Using default value %d',
+ DEFAULT_TIME_FRAME
+ )
+ return DEFAULT_TIME_FRAME
+ if after:
+ return params['ima']['time_frame']['after']
+ else:
+ return params['ima']['time_frame']['before']
diff --git a/core/schains/monitor/__init__.py b/core/schains/monitor/__init__.py
index b8331a27e..ac76b3158 100644
--- a/core/schains/monitor/__init__.py
+++ b/core/schains/monitor/__init__.py
@@ -18,4 +18,5 @@
# along with this program. If not, see .
from .config_monitor import RegularConfigMonitor # noqa
+from .config_monitor import SyncConfigMonitor # noqa
from .skaled_monitor import get_skaled_monitor # noqa
diff --git a/core/schains/monitor/action.py b/core/schains/monitor/action.py
index 9ee78af1b..fb09d98c5 100644
--- a/core/schains/monitor/action.py
+++ b/core/schains/monitor/action.py
@@ -73,12 +73,16 @@
from core.schains.ima import ImaData
from core.schains.external_config import ExternalConfig, ExternalState
from core.schains.skaled_status import init_skaled_status
+from core.schains.ssl import update_ssl_change_date
-from tools.docker_utils import DockerUtils
-from tools.str_formatters import arguments_list_string
+from tools.configs import SYNC_NODE
from tools.configs.containers import IMA_CONTAINER, SCHAIN_CONTAINER
-
+from tools.docker_utils import DockerUtils
+from tools.helper import no_hyphens
+from tools.node_options import NodeOptions
from tools.notifications.messages import notify_repair_mode
+from tools.resources import get_statsd_client
+from tools.str_formatters import arguments_list_string
from web.models.schain import SChainRecord, upsert_schain_record
@@ -144,7 +148,8 @@ def __init__(
checks: ConfigChecks,
estate: ExternalState,
current_nodes: List[ExtendedManagerNodeInfo],
- econfig: Optional[ExternalConfig] = None
+ econfig: Optional[ExternalConfig] = None,
+ node_options: NodeOptions = None
):
self.skale = skale
self.schain = schain
@@ -158,9 +163,11 @@ def __init__(
self.rotation_id = rotation_data['rotation_id']
self.estate = estate
self.econfig = econfig or ExternalConfig(name=schain['name'])
+ self.node_options = node_options or NodeOptions()
self.cfm: ConfigFileManager = ConfigFileManager(
schain_name=self.schain['name']
)
+ self.statsd_client = get_statsd_client()
super().__init__(name=schain['name'])
@BaseActionManager.monitor_block
@@ -172,71 +179,73 @@ def config_dir(self) -> bool:
@BaseActionManager.monitor_block
def dkg(self) -> bool:
initial_status = self.checks.dkg.status
- if not initial_status:
- logger.info('Initing dkg client')
- dkg_client = get_dkg_client(
- skale=self.skale,
- node_id=self.node_config.id,
- schain_name=self.name,
- sgx_key_name=self.node_config.sgx_key_name,
- rotation_id=self.rotation_id
- )
- logger.info('Running run_dkg')
- dkg_result = run_dkg(
- dkg_client=dkg_client,
- skale=self.skale,
- schain_name=self.name,
- node_id=self.node_config.id,
- sgx_key_name=self.node_config.sgx_key_name,
- rotation_id=self.rotation_id
- )
- logger.info('DKG finished with %s', dkg_result)
- if dkg_result.status.is_done():
- save_dkg_results(
- dkg_result.keys_data,
- get_secret_key_share_filepath(self.name, self.rotation_id)
+ with self.statsd_client.timer(f'admin.action.dkg.{no_hyphens(self.name)}'):
+ if not initial_status:
+ logger.info('Initing dkg client')
+ dkg_client = get_dkg_client(
+ skale=self.skale,
+ node_id=self.node_config.id,
+ schain_name=self.name,
+ sgx_key_name=self.node_config.sgx_key_name,
+ rotation_id=self.rotation_id
)
- self.schain_record.set_dkg_status(dkg_result.status)
- if not dkg_result.status.is_done():
- raise DkgError('DKG failed')
- else:
- logger.info('Dkg - ok')
+ logger.info('Running run_dkg')
+ dkg_result = run_dkg(
+ dkg_client=dkg_client,
+ skale=self.skale,
+ schain_name=self.name,
+ node_id=self.node_config.id,
+ sgx_key_name=self.node_config.sgx_key_name,
+ rotation_id=self.rotation_id
+ )
+ logger.info('DKG finished with %s', dkg_result)
+ if dkg_result.status.is_done():
+ save_dkg_results(
+ dkg_result.keys_data,
+ get_secret_key_share_filepath(self.name, self.rotation_id)
+ )
+ self.schain_record.set_dkg_status(dkg_result.status)
+ if not dkg_result.status.is_done():
+ raise DkgError('DKG failed')
+ else:
+ logger.info('Dkg - ok')
return initial_status
@BaseActionManager.monitor_block
def upstream_config(self) -> bool:
- logger.info(
- 'Creating new upstream_config rotation_id: %s, stream: %s',
- self.rotation_data.get('rotation_id'), self.stream_version
- )
- new_config = create_new_upstream_config(
- skale=self.skale,
- node_id=self.node_config.id,
- schain_name=self.name,
- generation=self.generation,
- ecdsa_sgx_key_name=self.node_config.sgx_key_name,
- rotation_data=self.rotation_data,
- stream_version=self.stream_version,
- schain_record=self.schain_record,
- file_manager=self.cfm
- )
-
- result = False
- if not self.cfm.upstream_config_exists() or new_config != self.cfm.latest_upstream_config:
- rotation_id = self.rotation_data['rotation_id']
+ with self.statsd_client.timer(f'admin.action.upstream_config.{no_hyphens(self.name)}'):
logger.info(
- 'Saving new upstream config rotation_id: %d, ips: %s',
- rotation_id,
- self.current_nodes
+ 'Creating new upstream_config rotation_id: %s, stream: %s',
+ self.rotation_data.get('rotation_id'), self.stream_version
+ )
+ new_config = create_new_upstream_config(
+ skale=self.skale,
+ node_config=self.node_config,
+ schain_name=self.name,
+ generation=self.generation,
+ ecdsa_sgx_key_name=self.node_config.sgx_key_name,
+ rotation_data=self.rotation_data,
+ sync_node=SYNC_NODE,
+ node_options=self.node_options
)
- self.cfm.save_new_upstream(rotation_id, new_config)
- result = True
- else:
- logger.info('Generated config is the same as latest upstream')
- update_schain_config_version(
- self.name, schain_record=self.schain_record)
- return result
+ result = False
+ if not self.cfm.upstream_config_exists() or \
+ new_config != self.cfm.latest_upstream_config:
+ rotation_id = self.rotation_data['rotation_id']
+ logger.info(
+ 'Saving new upstream config rotation_id: %d, ips: %s',
+ rotation_id,
+ self.current_nodes
+ )
+ self.cfm.save_new_upstream(rotation_id, new_config)
+ result = True
+ else:
+ logger.info('Generated config is the same as latest upstream')
+
+ update_schain_config_version(
+ self.name, schain_record=self.schain_record)
+ return result
@BaseActionManager.monitor_block
def reset_config_record(self) -> bool:
@@ -253,17 +262,30 @@ def external_state(self) -> bool:
return True
@BaseActionManager.monitor_block
- def update_reload_ts(self, ip_matched: bool) -> bool:
+ def update_reload_ts(self, ip_matched: bool, sync_node: bool = False) -> bool:
+ '''
+ - If ip_matched is True, then config is synced and skaled reload is not needed
+ - If ip_matched is False, then config is not synced and skaled reload is needed
+
+ For sync node node_index_in_group is always 0 to reload sync nodes immediately
+ '''
logger.info('Setting reload_ts')
if ip_matched:
logger.info('Resetting reload_ts')
self.estate.reload_ts = None
self.econfig.update(self.estate)
return True
- node_index_in_group = get_node_index_in_group(self.skale, self.name, self.node_config.id)
- if node_index_in_group is None:
- logger.warning(f'node {self.node_config.id} is not in chain {self.name}')
- return False
+
+ node_index_in_group = 0
+ if not sync_node:
+ node_index_in_group = get_node_index_in_group(
+ self.skale,
+ self.name,
+ self.node_config.id
+ )
+ if node_index_in_group is None:
+ logger.warning(f'node {self.node_config.id} is not in chain {self.name}')
+ return False
self.estate.reload_ts = calc_reload_ts(self.current_nodes, node_index_in_group)
logger.info(f'Setting reload_ts to {self.estate.reload_ts}')
self.econfig.update(self.estate)
@@ -278,7 +300,8 @@ def __init__(
checks: SkaledChecks,
node_config: NodeConfig,
econfig: Optional[ExternalConfig] = None,
- dutils: DockerUtils = None
+ dutils: DockerUtils = None,
+ node_options: NodeOptions = None
):
self.schain = schain
self.generation = schain['generation']
@@ -295,6 +318,9 @@ def __init__(
self.esfm = ExitScheduleFileManager(schain['name'])
self.dutils = dutils or DockerUtils()
+ self.statsd_client = get_statsd_client()
+
+ self.node_options = node_options or NodeOptions()
super().__init__(name=schain['name'])
@@ -303,7 +329,7 @@ def volume(self) -> bool:
initial_status = self.checks.volume.status
if not initial_status:
logger.info('Creating volume')
- init_data_volume(self.schain, dutils=self.dutils)
+ init_data_volume(self.schain, sync_node=SYNC_NODE, dutils=self.dutils)
else:
logger.info('Volume - ok')
return initial_status
@@ -323,13 +349,18 @@ def firewall_rules(self, upstream: bool = False) -> bool:
ranges = self.econfig.ranges
logger.info('Adding ranges %s', ranges)
- self.rc.configure(
- base_port=base_port,
- own_ip=own_ip,
- node_ips=node_ips,
- sync_ip_ranges=ranges
- )
- self.rc.sync()
+ with self.statsd_client.timer(f'admin.action.firewall.{no_hyphens(self.name)}'):
+ self.rc.configure(
+ base_port=base_port,
+ own_ip=own_ip,
+ node_ips=node_ips,
+ sync_ip_ranges=ranges
+ )
+ self.statsd_client.gauge(
+ f'admin.action.expected_rules.{no_hyphens(self.name)}',
+ len(self.rc.expected_rules())
+ )
+ self.rc.sync()
return initial_status
@BaseActionManager.monitor_block
@@ -351,7 +382,9 @@ def skaled_container(
download_snapshot=download_snapshot,
start_ts=start_ts,
abort_on_exit=abort_on_exit,
- dutils=self.dutils
+ dutils=self.dutils,
+ sync_node=SYNC_NODE,
+ historic_state=self.node_options.historic_state
)
time.sleep(CONTAINER_POST_RUN_DELAY)
return True
@@ -363,9 +396,10 @@ def restart_skaled_container(self) -> bool:
logger.info('Skaled container exists, restarting')
restart_container(SCHAIN_CONTAINER, self.schain,
dutils=self.dutils)
+ update_ssl_change_date(self.schain_record)
else:
logger.info(
- 'Skaled container doesn\'t exists, running skaled watchman')
+ 'Skaled container does not exists, running skaled watchman')
initial_status = self.skaled_container()
return initial_status
@@ -394,7 +428,7 @@ def reloaded_skaled_container(self, abort_on_exit: bool = True) -> bool:
logger.info('Removing skaled container')
remove_schain_container(self.name, dutils=self.dutils)
else:
- logger.warning('Container doesn\'t exists')
+ logger.warning('Container does not exists')
self.schain_record.set_restart_count(0)
self.schain_record.set_failed_rpc_count(0)
self.schain_record.set_needs_reload(False)
@@ -443,7 +477,7 @@ def ima_container(self) -> bool:
migration_ts = get_ima_migration_ts(self.name)
logger.debug('Migration time for %s IMA - %d', self.name, migration_ts)
if not initial_status:
- pull_new_image(type=IMA_CONTAINER, dutils=self.dutils)
+ pull_new_image(image_type=IMA_CONTAINER, dutils=self.dutils)
ima_data = ImaData(
linked=self.econfig.ima_linked,
chain_id=self.econfig.chain_id
diff --git a/core/schains/monitor/config_monitor.py b/core/schains/monitor/config_monitor.py
index 3bd285ae3..47587a1bc 100644
--- a/core/schains/monitor/config_monitor.py
+++ b/core/schains/monitor/config_monitor.py
@@ -29,11 +29,7 @@
class BaseConfigMonitor(IMonitor):
- def __init__(
- self,
- action_manager: ConfigActionManager,
- checks: ConfigChecks
- ) -> None:
+ def __init__(self, action_manager: ConfigActionManager, checks: ConfigChecks) -> None:
self.am = action_manager
self.checks = checks
@@ -65,3 +61,15 @@ def execute(self) -> None:
self.am.upstream_config()
self.am.update_reload_ts(self.checks.skaled_node_ips)
self.am.reset_config_record()
+
+
+class SyncConfigMonitor(BaseConfigMonitor):
+ def execute(self) -> None:
+ if not self.checks.config_dir:
+ self.am.config_dir()
+ if not self.checks.external_state:
+ self.am.external_state()
+ if self.checks.last_dkg_successful and not self.checks.upstream_config:
+ self.am.upstream_config()
+ self.am.update_reload_ts(self.checks.skaled_node_ips, sync_node=True)
+ self.am.reset_config_record()
diff --git a/core/schains/monitor/containers.py b/core/schains/monitor/containers.py
index 66ee3b488..001121395 100644
--- a/core/schains/monitor/containers.py
+++ b/core/schains/monitor/containers.py
@@ -19,10 +19,12 @@
import logging
import time
+from typing import Optional
from core.schains.volume import is_volume_exists
from core.schains.runner import (
get_container_image,
+ get_ima_container_time_frame,
get_image_name,
is_container_exists,
is_schain_container_failed,
@@ -32,14 +34,15 @@
run_schain_container
)
from core.ima.schain import copy_schain_ima_abi
-from core.schains.ima import ImaData
+from core.schains.ima import get_ima_time_frame, ImaData
+from core.schains.ssl import update_ssl_change_date
+from tools.configs import SYNC_NODE
from tools.configs.containers import (
MAX_SCHAIN_RESTART_COUNT,
SCHAIN_CONTAINER,
IMA_CONTAINER
)
-from tools.configs.ima import DISABLE_IMA
from tools.docker_utils import DockerUtils
@@ -53,13 +56,15 @@ def monitor_schain_container(
download_snapshot=False,
start_ts=None,
abort_on_exit: bool = True,
- dutils=None
+ dutils: Optional[DockerUtils] = None,
+ sync_node: bool = False,
+ historic_state: bool = False
) -> None:
dutils = dutils or DockerUtils()
schain_name = schain['name']
logger.info(f'Monitoring container for sChain {schain_name}')
- if not is_volume_exists(schain_name, dutils=dutils):
+ if not is_volume_exists(schain_name, sync_node=sync_node, dutils=dutils):
logger.error(f'Data volume for sChain {schain_name} does not exist')
return
@@ -76,9 +81,12 @@ def monitor_schain_container(
schain=schain,
download_snapshot=download_snapshot,
start_ts=start_ts,
+ dutils=dutils,
snapshot_from=schain_record.snapshot_from,
- dutils=dutils
+ sync_node=sync_node,
+ historic_state=historic_state,
)
+ update_ssl_change_date(schain_record)
schain_record.reset_failed_counters()
return
@@ -93,6 +101,7 @@ def monitor_schain_container(
if schain_record.restart_count < MAX_SCHAIN_RESTART_COUNT:
logger.info('sChain %s: restarting container', schain_name)
restart_container(SCHAIN_CONTAINER, schain, dutils=dutils)
+ update_ssl_change_date(schain_record)
schain_record.set_restart_count(schain_record.restart_count + 1)
schain_record.set_failed_rpc_count(0)
else:
@@ -113,8 +122,7 @@ def monitor_ima_container(
) -> None:
schain_name = schain["name"]
- if DISABLE_IMA:
- logger.info(f'{schain_name} - IMA is disabled, skipping')
+ if SYNC_NODE:
return
if not ima_data.linked:
@@ -125,29 +133,35 @@ def monitor_ima_container(
container_exists = is_container_exists(
schain_name, container_type=IMA_CONTAINER, dutils=dutils)
- container_image = get_container_image(schain_name, IMA_CONTAINER, dutils)
- new_image = get_image_name(type=IMA_CONTAINER, new=True)
-
- expected_image = get_image_name(type=IMA_CONTAINER)
- logger.debug('%s IMA image %s, expected %s', schain_name,
- container_image, expected_image)
if time.time() > migration_ts:
- logger.debug('%s IMA migration time passed', schain_name)
- expected_image = new_image
- if container_exists and expected_image != container_image:
- logger.info(
- '%s Removing old container as part of IMA migration', schain_name)
- remove_container(schain_name, IMA_CONTAINER, dutils)
- container_exists = False
+ logger.debug('IMA migration time passed')
+
+ image = get_image_name(image_type=IMA_CONTAINER, new=True)
+ time_frame = get_ima_time_frame(schain_name, after=True)
+ if container_exists:
+ container_image = get_container_image(schain_name, IMA_CONTAINER, dutils)
+ container_time_frame = get_ima_container_time_frame(schain_name, dutils)
+
+ if image != container_image or time_frame != container_time_frame:
+ logger.info('Removing old container as part of IMA migration')
+ remove_container(schain_name, IMA_CONTAINER, dutils)
+ container_exists = False
+ else:
+ time_frame = get_ima_time_frame(schain_name, after=False)
+ image = get_image_name(image_type=IMA_CONTAINER, new=False)
+ logger.debug('IMA time frame %d', time_frame)
if not container_exists:
- logger.info('%s No IMA container, creating, image %s',
- schain_name, expected_image)
+ logger.info(
+ '%s No IMA container, creating, image %s, time frame %d',
+ schain_name, image, time_frame
+ )
run_ima_container(
schain,
ima_data.chain_id,
- image=expected_image,
+ image=image,
+ time_frame=time_frame,
dutils=dutils
)
else:
diff --git a/core/schains/monitor/main.py b/core/schains/monitor/main.py
index 417c56e96..58010d347 100644
--- a/core/schains/monitor/main.py
+++ b/core/schains/monitor/main.py
@@ -31,29 +31,23 @@
from core.node import get_skale_node_version
from core.node_config import NodeConfig
-from core.schains.checks import (
- ConfigChecks,
- get_api_checks_status,
- TG_ALLOWED_CHECKS,
- SkaledChecks
-)
+from core.schains.checks import ConfigChecks, get_api_checks_status, TG_ALLOWED_CHECKS, SkaledChecks
from core.schains.config.file_manager import ConfigFileManager
from core.schains.firewall import get_default_rule_controller
from core.schains.firewall.utils import get_sync_agent_ranges
-from core.schains.monitor import (
- get_skaled_monitor,
- RegularConfigMonitor
-)
+from core.schains.monitor import get_skaled_monitor, RegularConfigMonitor, SyncConfigMonitor
from core.schains.monitor.action import ConfigActionManager, SkaledActionManager
from core.schains.external_config import ExternalConfig, ExternalState
from core.schains.task import keep_tasks_running, Task
from core.schains.config.static_params import get_automatic_repair_option
from core.schains.skaled_status import get_skaled_status
from core.node import get_current_nodes
+
from tools.docker_utils import DockerUtils
-from tools.configs.ima import DISABLE_IMA
+from tools.configs import SYNC_NODE
from tools.notifications.messages import notify_checks
-from tools.helper import is_node_part_of_chain
+from tools.helper import is_node_part_of_chain, no_hyphens
+from tools.resources import get_statsd_client
from web.models.schain import SChainRecord
@@ -67,23 +61,19 @@
def run_config_pipeline(
- skale: Skale,
- skale_ima: SkaleIma,
- schain: Dict,
- node_config: NodeConfig,
- stream_version: str
+ skale: Skale, skale_ima: SkaleIma, schain: Dict, node_config: NodeConfig, stream_version: str
) -> None:
name = schain['name']
schain_record = SChainRecord.get_by_name(name)
rotation_data = skale.node_rotation.get_rotation(name)
allowed_ranges = get_sync_agent_ranges(skale)
- ima_linked = not DISABLE_IMA and skale_ima.linker.has_schain(name)
+ ima_linked = not SYNC_NODE and skale_ima.linker.has_schain(name)
+ group_index = skale.schains.name_to_group_id(name)
+ last_dkg_successful = skale.dkg.is_last_dkg_successful(group_index)
current_nodes = get_current_nodes(skale, name)
estate = ExternalState(
- ima_linked=ima_linked,
- chain_id=skale_ima.web3.eth.chain_id,
- ranges=allowed_ranges
+ ima_linked=ima_linked, chain_id=skale_ima.web3.eth.chain_id, ranges=allowed_ranges
)
econfig = ExternalConfig(name)
config_checks = ConfigChecks(
@@ -93,8 +83,9 @@ def run_config_pipeline(
stream_version=stream_version,
rotation_id=rotation_data['rotation_id'],
current_nodes=current_nodes,
+ last_dkg_successful=last_dkg_successful,
econfig=econfig,
- estate=estate
+ estate=estate,
)
config_am = ConfigActionManager(
@@ -106,23 +97,38 @@ def run_config_pipeline(
checks=config_checks,
current_nodes=current_nodes,
estate=estate,
- econfig=econfig
+ econfig=econfig,
)
- status = config_checks.get_all(log=False)
+ status = config_checks.get_all(log=False, expose=True)
logger.info('Config checks: %s', status)
- mon = RegularConfigMonitor(config_am, config_checks)
- mon.run()
+
+ if SYNC_NODE:
+ logger.info(
+ 'Sync node last_dkg_successful %s, rotation_data %s',
+ last_dkg_successful,
+ rotation_data
+ )
+ mon = SyncConfigMonitor(config_am, config_checks)
+ else:
+ logger.info('Regular node mode, running config monitor')
+ mon = RegularConfigMonitor(config_am, config_checks)
+ statsd_client = get_statsd_client()
+
+ statsd_client.incr(f'admin.config_pipeline.{mon.__class__.__name__}.{no_hyphens(name)}')
+ statsd_client.gauge(
+ f'admin.config_pipeline.rotation_id.{no_hyphens(name)}', rotation_data['rotation_id']
+ )
+ with statsd_client.timer(f'admin.config_pipeline.duration.{no_hyphens(name)}'):
+ mon.run()
def run_skaled_pipeline(
- skale: Skale,
- schain: Dict,
- node_config: NodeConfig,
- dutils: DockerUtils
+ skale: Skale, schain: Dict, node_config: NodeConfig, dutils: DockerUtils
) -> None:
name = schain['name']
schain_record = SChainRecord.get_by_name(name)
+ logger.info('Record: %s', SChainRecord.to_dict(schain_record))
dutils = dutils or DockerUtils()
@@ -131,7 +137,8 @@ def run_skaled_pipeline(
schain_name=schain['name'],
schain_record=schain_record,
rule_controller=rc,
- dutils=dutils
+ dutils=dutils,
+ sync_node=SYNC_NODE,
)
skaled_status = get_skaled_status(name)
@@ -142,34 +149,36 @@ def run_skaled_pipeline(
checks=skaled_checks,
node_config=node_config,
econfig=ExternalConfig(name),
- dutils=dutils
+ dutils=dutils,
)
- status = skaled_checks.get_all(log=False)
+ status = skaled_checks.get_all(log=False, expose=True)
automatic_repair = get_automatic_repair_option()
- api_status = get_api_checks_status(
- status=status, allowed=TG_ALLOWED_CHECKS)
+ api_status = get_api_checks_status(status=status, allowed=TG_ALLOWED_CHECKS)
notify_checks(name, node_config.all(), api_status)
logger.info('Skaled status: %s', status)
logger.info('Upstream config %s', skaled_am.upstream_config_path)
+
mon = get_skaled_monitor(
action_manager=skaled_am,
status=status,
schain_record=schain_record,
skaled_status=skaled_status,
- automatic_repair=automatic_repair
+ automatic_repair=automatic_repair,
)
- mon(skaled_am, skaled_checks).run()
+
+ statsd_client = get_statsd_client()
+ statsd_client.incr(f'admin.skaled_pipeline.{mon.__name__}.{no_hyphens(name)}')
+ with statsd_client.timer(f'admin.skaled_pipeline.duration.{no_hyphens(name)}'):
+ mon(skaled_am, skaled_checks).run()
def post_monitor_sleep():
schain_monitor_sleep = random.randint(
- MIN_SCHAIN_MONITOR_SLEEP_INTERVAL,
- MAX_SCHAIN_MONITOR_SLEEP_INTERVAL
+ MIN_SCHAIN_MONITOR_SLEEP_INTERVAL, MAX_SCHAIN_MONITOR_SLEEP_INTERVAL
)
- logger.info('Monitor iteration completed, sleeping for %d',
- schain_monitor_sleep)
+ logger.info('Monitor iteration completed, sleeping for %d', schain_monitor_sleep)
time.sleep(schain_monitor_sleep)
@@ -182,22 +191,30 @@ def create_and_execute_tasks(
schain_record,
executor,
futures,
- dutils
+ dutils,
):
reload(web3_request)
name = schain['name']
is_rotation_active = skale.node_rotation.is_rotation_active(name)
- leaving_chain = not is_node_part_of_chain(skale, name, node_config.id)
+ leaving_chain = not SYNC_NODE and not is_node_part_of_chain(skale, name, node_config.id)
if leaving_chain and not is_rotation_active:
logger.info('Not on node (%d), finishing process', node_config.id)
return True
logger.info(
'sync_config_run %s, config_version %s, stream_version %s',
- schain_record.sync_config_run, schain_record.config_version, stream_version
+ schain_record.sync_config_run,
+ schain_record.config_version,
+ stream_version,
)
+
+ statsd_client = get_statsd_client()
+ monitor_last_seen_ts = schain_record.monitor_last_seen.timestamp()
+ statsd_client.incr(f'admin.schain.monitor.{no_hyphens(name)}')
+ statsd_client.gauge(f'admin.schain.monitor_last_seen.{no_hyphens(name)}', monitor_last_seen_ts)
+
tasks = []
if not leaving_chain:
logger.info('Adding config task to the pool')
@@ -210,12 +227,14 @@ def create_and_execute_tasks(
skale_ima=skale_ima,
schain=schain,
node_config=node_config,
- stream_version=stream_version
+ stream_version=stream_version,
),
- sleep=CONFIG_PIPELINE_SLEEP
- ))
- if schain_record.config_version != stream_version or \
- (schain_record.sync_config_run and schain_record.first_run):
+ sleep=CONFIG_PIPELINE_SLEEP,
+ )
+ )
+ if schain_record.config_version != stream_version or (
+ schain_record.sync_config_run and schain_record.first_run
+ ):
ConfigFileManager(name).remove_skaled_config()
else:
logger.info('Adding skaled task to the pool')
@@ -227,10 +246,11 @@ def create_and_execute_tasks(
skale=skale,
schain=schain,
node_config=node_config,
- dutils=dutils
+ dutils=dutils,
),
- sleep=SKALED_PIPELINE_SLEEP
- ))
+ sleep=SKALED_PIPELINE_SLEEP,
+ )
+ )
if len(tasks) == 0:
logger.warning('No tasks to run')
@@ -238,12 +258,7 @@ def create_and_execute_tasks(
def run_monitor_for_schain(
- skale,
- skale_ima,
- node_config: NodeConfig,
- schain,
- dutils=None,
- once=False
+ skale, skale_ima, node_config: NodeConfig, schain, dutils=None, once=False
):
stream_version = get_skale_node_version()
tasks_number = 2
@@ -261,7 +276,7 @@ def run_monitor_for_schain(
schain_record,
executor,
futures,
- dutils
+ dutils,
)
if once:
return True
diff --git a/core/schains/monitor/skaled_monitor.py b/core/schains/monitor/skaled_monitor.py
index fe94ca4cc..aac40e16a 100644
--- a/core/schains/monitor/skaled_monitor.py
+++ b/core/schains/monitor/skaled_monitor.py
@@ -27,6 +27,9 @@
from core.schains.monitor.action import SkaledActionManager
from core.schains.config.main import get_number_of_secret_shares
from core.schains.skaled_status import SkaledStatus
+from core.schains.ssl import ssl_reload_needed
+from tools.configs import SYNC_NODE
+from tools.resources import get_statsd_client
from web.models.schain import SChainRecord
@@ -41,6 +44,7 @@ def __init__(
) -> None:
self.am = action_manager
self.checks = checks
+ self.statsd_client = get_statsd_client()
@abstractmethod
def execute(self) -> None:
@@ -72,7 +76,7 @@ def execute(self) -> None:
self.am.reset_restart_counter()
if not self.checks.rpc:
self.am.skaled_rpc()
- if not self.checks.ima_container:
+ if not self.checks.ima_container and not SYNC_NODE:
self.am.ima_container()
@@ -267,8 +271,8 @@ def is_config_update_time(
return not status['skaled_container'] and skaled_status.exit_time_reached
-def is_recreate_mode(schain_record: SChainRecord) -> bool:
- return schain_record.needs_reload
+def is_recreate_mode(status: Dict, schain_record: SChainRecord) -> bool:
+ return status['skaled_container'] and ssl_reload_needed(schain_record)
def is_new_node_mode(schain_record: SChainRecord, finish_ts: Optional[int]) -> bool:
@@ -303,13 +307,27 @@ def get_skaled_monitor(
skaled_status.log()
mon_type: Type[BaseSkaledMonitor] = RegularSkaledMonitor
+
+ if SYNC_NODE:
+ if no_config(status):
+ mon_type = NoConfigSkaledMonitor
+ if is_recreate_mode(status, schain_record):
+ mon_type = RecreateSkaledMonitor
+ elif is_config_update_time(status, skaled_status):
+ mon_type = UpdateConfigSkaledMonitor
+ elif is_reload_group_mode(status, action_manager.upstream_finish_ts):
+ mon_type = ReloadGroupSkaledMonitor
+ elif is_reload_ip_mode(status, action_manager.econfig.reload_ts):
+ mon_type = ReloadIpSkaledMonitor
+ return mon_type
+
if no_config(status):
mon_type = NoConfigSkaledMonitor
elif is_backup_mode(schain_record):
mon_type = BackupSkaledMonitor
elif is_repair_mode(schain_record, status, skaled_status, automatic_repair):
mon_type = RepairSkaledMonitor
- elif is_recreate_mode(schain_record):
+ elif is_recreate_mode(status, schain_record):
mon_type = RecreateSkaledMonitor
elif is_new_node_mode(schain_record, action_manager.finish_ts):
mon_type = NewNodeSkaledMonitor
diff --git a/core/schains/process_manager.py b/core/schains/process_manager.py
index 2397bed77..fddaa6a4d 100644
--- a/core/schains/process_manager.py
+++ b/core/schains/process_manager.py
@@ -19,6 +19,7 @@
import sys
import logging
+from typing import Dict
from multiprocessing import Process
from skale import Skale
@@ -60,34 +61,37 @@ def run_process_manager(skale, skale_ima, node_config):
notify_if_not_enough_balance(skale, node_info)
schains_to_monitor = fetch_schains_to_monitor(skale, node_id)
-
for schain in schains_to_monitor:
- schain_record = upsert_schain_record(schain['name'])
- log_prefix = f'sChain {schain["name"]} -' # todo - move to logger formatter
-
- terminate_stuck_schain_process(skale, schain_record, schain)
- monitor_process_alive = is_monitor_process_alive(schain_record.monitor_id)
-
- if not monitor_process_alive:
- logger.info(f'{log_prefix} PID {schain_record.monitor_id} is not running, spawning...')
- process = Process(
- name=schain['name'],
- target=run_monitor_for_schain,
- args=(
- skale,
- skale_ima,
- node_config,
- schain
- )
- )
- process.start()
- schain_record.set_monitor_id(process.ident)
- logger.info(f'{log_prefix} Process started: PID = {process.ident}')
- else:
- logger.info(f'{log_prefix} Process is running: PID = {schain_record.monitor_id}')
+ run_pm_schain(skale, skale_ima, node_config, schain)
logger.info('Process manager procedure finished')
+def run_pm_schain(skale, skale_ima, node_config, schain: Dict) -> None:
+ schain_record = upsert_schain_record(schain['name'])
+ log_prefix = f'sChain {schain["name"]} -' # todo - move to logger formatter
+
+ terminate_stuck_schain_process(skale, schain_record, schain)
+ monitor_process_alive = is_monitor_process_alive(schain_record.monitor_id)
+
+ if not monitor_process_alive:
+ logger.info(f'{log_prefix} PID {schain_record.monitor_id} is not running, spawning...')
+ process = Process(
+ name=schain['name'],
+ target=run_monitor_for_schain,
+ args=(
+ skale,
+ skale_ima,
+ node_config,
+ schain
+ )
+ )
+ process.start()
+ schain_record.set_monitor_id(process.ident)
+ logger.info(f'{log_prefix} Process started: PID = {process.ident}')
+ else:
+ logger.info(f'{log_prefix} Process is running: PID = {schain_record.monitor_id}')
+
+
def fetch_schains_to_monitor(skale: Skale, node_id: int) -> list:
"""
Returns list of sChain dicts that admin should monitor (currently assigned + rotating).
diff --git a/core/schains/rpc.py b/core/schains/rpc.py
index 10519e14b..a9279e265 100644
--- a/core/schains/rpc.py
+++ b/core/schains/rpc.py
@@ -17,6 +17,8 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import json
+import logging
import time
from tools.configs import ALLOWED_TIMESTAMP_DIFF
@@ -24,6 +26,9 @@
from tools.helper import post_request
+logger = logging.getLogger(__name__)
+
+
def make_rpc_call(http_endpoint, method, params=None, timeout=None) -> bool:
params = params or []
return post_request(
@@ -47,10 +52,16 @@ def check_endpoint_alive(http_endpoint, timeout=None):
def check_endpoint_blocks(http_endpoint):
res = make_rpc_call(http_endpoint, 'eth_getBlockByNumber', ['latest', False])
- if res and res.json():
- res_data = res.json()
- latest_schain_timestamp_hex = res_data['result']['timestamp']
- latest_schain_timestamp = int(latest_schain_timestamp_hex, 16)
- admin_timestamp = int(time.time())
- return abs(latest_schain_timestamp - admin_timestamp) < ALLOWED_TIMESTAMP_DIFF
- return False
+ healthy = False
+ if res:
+ try:
+ res_data = res.json()
+ latest_schain_timestamp_hex = res_data['result']['timestamp']
+ latest_schain_timestamp = int(latest_schain_timestamp_hex, 16)
+ admin_timestamp = int(time.time())
+ healthy = abs(latest_schain_timestamp - admin_timestamp) < ALLOWED_TIMESTAMP_DIFF
+ except (json.JSONDecodeError, KeyError, ValueError) as e:
+ logger.warning('Failed to parse response, error: %s', e)
+ else:
+ logger.warning('Empty response from skaled')
+ return healthy
diff --git a/core/schains/runner.py b/core/schains/runner.py
index 15cf2fa51..e65aa6394 100644
--- a/core/schains/runner.py
+++ b/core/schains/runner.py
@@ -33,16 +33,18 @@
from tools.docker_utils import DockerUtils
from tools.str_formatters import arguments_list_string
from tools.configs.containers import (
- CONTAINERS_INFO,
CONTAINER_NAME_PREFIX,
DATA_DIR_CONTAINER_PATH,
IMA_CONTAINER,
+ HISTORIC_STATE_IMAGE_POSTFIX,
SCHAIN_CONTAINER,
- SCHAIN_STOP_TIMEOUT
+ SCHAIN_STOP_TIMEOUT,
+ CONTAINERS_INFO
)
from tools.configs import (NODE_DATA_PATH_HOST, SCHAIN_NODE_DATA_PATH, SKALE_DIR_HOST,
SKALE_VOLUME_PATH, SCHAIN_CONFIG_DIR_SKALED)
+
logger = logging.getLogger(__name__)
@@ -76,31 +78,33 @@ def is_container_running(
return dutils.is_container_running(container_name)
-def get_image_name(type: str, new: bool = False) -> str:
+def get_image_name(image_type: str, new: bool = False, historic_state: bool = False) -> str:
tag_field = 'version'
- if type == IMA_CONTAINER and new:
+ if image_type == IMA_CONTAINER and new:
tag_field = 'new_version'
- container_info = CONTAINERS_INFO[type]
- image_base_name = container_info['name']
- tag = container_info[tag_field]
- return f'{image_base_name}:{tag}'
+ container_info = CONTAINERS_INFO[image_type]
+ image_name = f'{container_info["name"]}:{container_info[tag_field]}'
+ if historic_state and image_type == SCHAIN_CONTAINER:
+ image_name += HISTORIC_STATE_IMAGE_POSTFIX
+ return image_name
-def get_container_name(type, schain_name):
- return f"{CONTAINER_NAME_PREFIX}_{type}_{schain_name}"
+def get_container_name(image_type: str, schain_name: str) -> str:
+ return f"{CONTAINER_NAME_PREFIX}_{image_type}_{schain_name}"
-def get_container_args(type):
- return copy.deepcopy(CONTAINERS_INFO[type]['args'])
+def get_container_args(image_type: str) -> str:
+ return copy.deepcopy(CONTAINERS_INFO[image_type]['args'])
-def get_container_custom_args(type):
- return copy.deepcopy(CONTAINERS_INFO[type]['custom_args'])
+def get_container_custom_args(image_type):
+ return copy.deepcopy(CONTAINERS_INFO[image_type]['custom_args'])
-def get_container_info(type, schain_name):
- return (get_image_name(type), get_container_name(type, schain_name),
- get_container_args(type), get_container_custom_args(type))
+def get_container_info(image_type: str, schain_name: str, historic_state: bool = False):
+ return (get_image_name(image_type=image_type, historic_state=historic_state),
+ get_container_name(image_type=image_type, schain_name=schain_name),
+ get_container_args(image_type=image_type), get_container_custom_args(image_type))
def get_logs_config(config):
@@ -113,7 +117,7 @@ def get_ulimits_config(config):
def run_container(
- type,
+ image_type,
schain_name,
env,
cmd=None,
@@ -122,11 +126,12 @@ def run_container(
mem_limit=None,
image=None,
dutils=None,
- volume_mode=None
+ volume_mode=None,
+ historic_state=False
):
dutils = dutils or DockerUtils()
default_image, container_name, run_args, custom_args = get_container_info(
- type, schain_name)
+ image_type, schain_name, historic_state=historic_state)
image_name = image or default_image
@@ -177,18 +182,22 @@ def run_schain_container(
dutils=None,
volume_mode=None,
ulimit_check=True,
+ enable_ssl=True,
snapshot_from: str = '',
- enable_ssl=True
+ sync_node=False,
+ historic_state=False
):
schain_name = schain['name']
schain_type = get_schain_type(schain['partOfNode'])
- cpu_limit = get_schain_limit(schain_type, MetricType.cpu_shares)
- mem_limit = get_schain_limit(schain_type, MetricType.mem)
+
+ cpu_limit = None if sync_node else get_schain_limit(schain_type, MetricType.cpu_shares)
+ mem_limit = None if sync_node else get_schain_limit(schain_type, MetricType.mem)
volume_config = get_schain_volume_config(
schain_name,
DATA_DIR_CONTAINER_PATH,
- mode=volume_mode
+ mode=volume_mode,
+ sync_node=sync_node
)
env = get_schain_env(ulimit_check=ulimit_check)
@@ -197,28 +206,39 @@ def run_schain_container(
start_ts,
download_snapshot=download_snapshot,
enable_ssl=enable_ssl,
+ sync_node=sync_node,
snapshot_from=snapshot_from
)
- run_container(SCHAIN_CONTAINER, schain_name, env, cmd,
- volume_config, cpu_limit,
- mem_limit, dutils=dutils, volume_mode=volume_mode)
+ run_container(
+ SCHAIN_CONTAINER,
+ schain_name,
+ env,
+ cmd,
+ volume_config,
+ cpu_limit,
+ mem_limit,
+ volume_mode=volume_mode,
+ historic_state=historic_state,
+ dutils=dutils
+ )
def run_ima_container(
schain: dict,
mainnet_chain_id: int,
+ time_frame: int,
image: str,
dutils: DockerUtils = None
) -> None:
dutils = dutils or DockerUtils()
- env = get_ima_env(schain['name'], mainnet_chain_id)
+ env = get_ima_env(schain['name'], mainnet_chain_id, time_frame)
schain_type = get_schain_type(schain['partOfNode'])
cpu_limit = get_ima_limit(schain_type, MetricType.cpu_shares)
mem_limit = get_ima_limit(schain_type, MetricType.mem)
run_container(
- type=IMA_CONTAINER,
+ image_type=IMA_CONTAINER,
schain_name=schain['name'],
env=env.to_dict(),
cpu_shares_limit=cpu_limit,
@@ -275,18 +295,23 @@ def is_schain_container_failed(
return bad_state
-def is_new_image_pulled(type: str, dutils: DockerUtils) -> bool:
- image = get_image_name(type, new=True)
+def is_new_image_pulled(image_type: str, dutils: DockerUtils) -> bool:
+ image = get_image_name(image_type, new=True)
return dutils.pulled(image)
-def remove_container(schain_name: str, type: str, dutils: DockerUtils):
- container = get_container_name(type=type, schain_name=schain_name)
+def remove_container(schain_name: str, image_type: str, dutils: DockerUtils):
+ container = get_container_name(image_type=image_type, schain_name=schain_name)
dutils.safe_rm(container)
-def pull_new_image(type: str, dutils: DockerUtils) -> None:
- image = get_image_name(type, new=True)
+def pull_new_image(image_type: str, dutils: DockerUtils) -> None:
+ image = get_image_name(image_type, new=True)
if not dutils.pulled(image):
logger.info('Pulling new image %s', image)
dutils.pull(image)
+
+
+def get_ima_container_time_frame(schain_name: str, dutils: DockerUtils) -> int:
+ container_name = get_container_name(IMA_CONTAINER, schain_name)
+ return int(dutils.get_container_env_value(container_name, 'TIME_FRAMING'))
diff --git a/core/schains/ssl.py b/core/schains/ssl.py
index f1d86a249..d5470b017 100644
--- a/core/schains/ssl.py
+++ b/core/schains/ssl.py
@@ -18,11 +18,18 @@
# along with this program. If not, see .
import os
-from tools.configs import SSL_CERTIFICATES_FILEPATH
+import logging
+from datetime import datetime
+
+from web.models.schain import SChainRecord
+from tools.configs import SSL_CERTIFICATES_FILEPATH, SSL_CERT_PATH
+
+
+logger = logging.getLogger(__name__)
def is_ssl_folder_empty(ssl_path=SSL_CERTIFICATES_FILEPATH):
- return len(os.listdir(SSL_CERTIFICATES_FILEPATH)) == 0
+ return len(os.listdir(ssl_path)) == 0
def get_ssl_filepath():
@@ -31,3 +38,31 @@ def get_ssl_filepath():
else:
return os.path.join(SSL_CERTIFICATES_FILEPATH, 'ssl_key'), \
os.path.join(SSL_CERTIFICATES_FILEPATH, 'ssl_cert')
+
+
+def get_ssl_files_change_date() -> datetime:
+ if is_ssl_folder_empty():
+ return
+ ssl_changed_ts = os.path.getmtime(SSL_CERT_PATH)
+ return datetime.utcfromtimestamp(ssl_changed_ts)
+
+
+def update_ssl_change_date(schain_record: SChainRecord) -> bool:
+ ssl_files_change_date = get_ssl_files_change_date()
+ if not ssl_files_change_date:
+ logger.warning(
+ f'Tried to update SSL change date for {schain_record.name}, but no SSL files found')
+ return False
+ schain_record.set_ssl_change_date(ssl_files_change_date)
+ return True
+
+
+def ssl_reload_needed(schain_record: SChainRecord) -> bool:
+ ssl_files_change_date = get_ssl_files_change_date()
+ if not ssl_files_change_date:
+ logger.warning(
+ f'Tried to get SSL change date for {schain_record.name}, but no SSL files found')
+ return False
+ logger.info(f'ssl_files_change_date: {ssl_files_change_date}, \
+ssl_change_date for chain {schain_record.name}: {schain_record.ssl_change_date}')
+ return ssl_files_change_date != schain_record.ssl_change_date
diff --git a/core/schains/types.py b/core/schains/types.py
index 8de4ce8ec..0982401c4 100644
--- a/core/schains/types.py
+++ b/core/schains/types.py
@@ -26,6 +26,7 @@ class SchainType(Enum):
large = 128
medium = 16
small = 1
+ sync_node = 314
class ContainerType(Enum):
diff --git a/core/schains/volume.py b/core/schains/volume.py
index ef9f5b114..dbba93cd4 100644
--- a/core/schains/volume.py
+++ b/core/schains/volume.py
@@ -18,9 +18,12 @@
# along with this program. If not, see .
import logging
+import os
+import shutil
from core.schains.limits import get_schain_limit, get_schain_type
from core.schains.types import MetricType
+from tools.configs.schains import SCHAIN_STATE_PATH, SCHAIN_STATIC_PATH
from tools.configs.containers import (
SHARED_SPACE_VOLUME_NAME,
SHARED_SPACE_CONTAINER_PATH
@@ -31,30 +34,70 @@
logger = logging.getLogger(__name__)
-def is_volume_exists(schain_name, dutils=None):
+def is_volume_exists(schain_name, sync_node=False, dutils=None):
dutils = dutils or DockerUtils()
- return dutils.is_data_volume_exists(schain_name)
+ if sync_node:
+ schain_state = os.path.join(SCHAIN_STATE_PATH, schain_name)
+ schain_static_path = os.path.join(SCHAIN_STATIC_PATH, schain_name)
+ return os.path.isdir(schain_state) and os.path.islink(schain_static_path)
+ else:
+ return dutils.is_data_volume_exists(schain_name)
-def init_data_volume(schain, dutils=None):
+def init_data_volume(
+ schain: dict,
+ sync_node: bool = False,
+ dutils: DockerUtils = None
+):
dutils = dutils or DockerUtils()
schain_name = schain['name']
- if dutils.is_data_volume_exists(schain_name):
+ if is_volume_exists(schain_name, sync_node=sync_node, dutils=dutils):
logger.debug(f'Volume already exists: {schain_name}')
return
logger.info(f'Creating volume for schain: {schain_name}')
- schain_type = get_schain_type(schain['partOfNode'])
- disk_limit = get_schain_limit(schain_type, MetricType.disk)
- return dutils.create_data_volume(schain_name, disk_limit)
+ if sync_node:
+ ensure_data_dir_path(schain['name'])
+ else:
+ schain_type = get_schain_type(schain['partOfNode'])
+ disk_limit = get_schain_limit(schain_type, MetricType.disk)
+ dutils.create_data_volume(schain_name, disk_limit)
-def get_schain_volume_config(name, mount_path, mode=None):
+def remove_data_dir(schain_name):
+ schain_state = os.path.join(SCHAIN_STATE_PATH, schain_name)
+ schain_static_path = os.path.join(SCHAIN_STATIC_PATH, schain_name)
+ os.remove(schain_static_path)
+ shutil.rmtree(schain_state)
+
+
+def ensure_data_dir_path(schain_name: str) -> None:
+ schain_state = os.path.join(SCHAIN_STATE_PATH, schain_name)
+ os.makedirs(schain_state, exist_ok=True)
+ schain_filestorage_state = os.path.join(schain_state, 'filestorage')
+ schain_static_path = os.path.join(SCHAIN_STATIC_PATH, schain_name)
+ if os.path.islink(schain_static_path):
+ os.unlink(schain_static_path)
+ os.symlink(
+ schain_filestorage_state,
+ schain_static_path,
+ target_is_directory=True
+ )
+
+
+def get_schain_volume_config(name, mount_path, mode=None, sync_node=False):
mode = mode or 'rw'
+ if sync_node:
+ datadir_src = os.path.join(SCHAIN_STATE_PATH, name)
+ shared_space_src = os.path.join(SCHAIN_STATE_PATH, SHARED_SPACE_VOLUME_NAME)
+ else:
+ datadir_src = name
+ shared_space_src = SHARED_SPACE_VOLUME_NAME
+
config = {
- f'{name}': {'bind': mount_path, 'mode': mode},
- SHARED_SPACE_VOLUME_NAME: {
+ datadir_src: {'bind': mount_path, 'mode': mode},
+ shared_space_src: {
'bind': SHARED_SPACE_CONTAINER_PATH,
'mode': mode
}
diff --git a/pytest.ini b/pytest.ini
index 5785bf3fe..2b9973db7 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,6 +1,6 @@
[pytest]
log_cli = 0
log_cli_level = INFO
-log_cli_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
+log_cli_format = [%(asctime)s] [%(levelname)8s] [%(threadName)s] %(message)s (%(filename)s:%(lineno)s)
log_cli_date_format=%Y-%m-%d %H:%M:%S
filterwarnings = ignore::DeprecationWarning
diff --git a/requirements.txt b/requirements.txt
index fc90bc453..27e560567 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
peewee==3.9.5
Flask==2.3.3
-Werkzeug==2.3.7
-gunicorn==20.1.0
+Werkzeug==3.0.3
+gunicorn==22.0.0
Jinja2==3.1.2
@@ -10,14 +10,16 @@ python-iptables==1.0.1
skale.py==6.2b0
+requests==2.31
ima-predeployed==2.1.0b0
etherbase-predeployed==1.1.0b3
marionette-predeployed==2.0.0b2
-config-controller-predeployed==1.0.1.dev2
+config-controller-predeployed==1.1.0
filestorage-predeployed==1.1.0.dev8
multisigwallet-predeployed==1.1.0a8
context-predeployed==1.0.0.dev3
+statsd==4.0.1
psutil==5.9.3
diff --git a/scripts/helper.sh b/scripts/helper.sh
index cfbb9b715..b8f15641a 100644
--- a/scripts/helper.sh
+++ b/scripts/helper.sh
@@ -2,13 +2,13 @@
export_test_env () {
export SKALE_DIR_HOST=$PWD/tests/skale-data
+ export SKALE_LIB_PATH=$PWD/tests/skale-data/lib
export RUNNING_ON_HOST=True
export PYTHONPATH=${PYTHONPATH}:.
export ENV=dev
export SGX_CERTIFICATES_FOLDER=$PWD/tests/skale-data/node_data/sgx_certs
export SGX_SERVER_URL=https://localhost:1026
export ENDPOINT=http://localhost:8545
- export IMA_ENDPOINT=http://localhost:1000
export DB_USER=user
export DB_PASSWORD=pass
export DB_PORT=3307
@@ -23,6 +23,7 @@ export_test_env () {
export SCHAIN_STOP_TIMEOUT=1
export ABI_FILEPATH=${ABI_FILEPATH="$PWD/helper-scripts/contracts_data/manager.json"}
export IMA_ABI_FILEPATH=${IMA_ABI_FILEPATH}
+ export DEFAULT_GAS_PRICE_WEI=1000000000
cp $PWD/helper-scripts/contracts_data/ima.json $SKALE_DIR_HOST/contracts_info
}
@@ -31,6 +32,7 @@ export_test_env () {
tests_cleanup () {
export_test_env
docker rm -f skale_schain_test && docker volume rm test || true
+ sudo rm -r tests/skale-data/lib || true
rm tests/skale-data/node_data/node_config.json || true
docker rm -f sgx-simulator || true
docker rm -f skale_schain_test1 skale_schain_test2 skale_schain_test3 || true
diff --git a/scripts/run_core_tests.sh b/scripts/run_core_tests.sh
index 9987b4af2..ae867c129 100755
--- a/scripts/run_core_tests.sh
+++ b/scripts/run_core_tests.sh
@@ -1,5 +1,5 @@
#!/usr/bin/env bash
-set -e
+set -ea
export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $DIR/helper.sh
@@ -15,5 +15,3 @@ bash scripts/run_redis.sh
py.test --cov-config=.coveragerc --cov=. tests/ --ignore=tests/firewall $@
tests_cleanup
-scripts/run_firewall_test.sh
-tests_cleanup
diff --git a/scripts/run_firewall_test.sh b/scripts/run_firewall_test.sh
index c30c24fc3..c3960525b 100755
--- a/scripts/run_firewall_test.sh
+++ b/scripts/run_firewall_test.sh
@@ -1,14 +1,15 @@
-set -e
+#!/usr/bin/env bash
+set -ea
+
+export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
docker rm -f test-firewall || true
-DIR=$PWD
docker build -t admin:base .
docker build -f tests.Dockerfile -t test-firewall .
-docker run -v "$DIR/tests/skale-data/node_data":"/skale_node_data" \
- -v "$DIR/tests/skale-data":"/skale_vol" \
+docker run -v "$DIR/../tests/skale-data/node_data":"/skale_node_data" \
+ -v "$DIR/../tests/skale-data":"/skale_vol" \
-e SGX_SERVER_URL="https://127.0.0.1:1026" \
-e ENDPOINT="http://127.0.0.1:8545" \
- -e IMA_ENDPOINT="http://127.0.01:1000" \
-e DB_USER="test" \
-e DB_PASSWORD="pass" \
-e DB_ROOT_PASSWORD="root-test-pass" \
diff --git a/scripts/test_generate_config_sync_v1.py b/scripts/test_generate_config_sync_v1.py
new file mode 100644
index 000000000..686bdbb95
--- /dev/null
+++ b/scripts/test_generate_config_sync_v1.py
@@ -0,0 +1,51 @@
+import json
+import pytest
+
+from core.schains.config.generator import SChainBaseConfig
+from core.schains.config.accounts import generate_dynamic_accounts
+
+from tools.helper import read_json
+from tools.configs.schains import BASE_SCHAIN_CONFIG_FILEPATH
+
+# Run only on admin 2.0.2 or older
+# Add config with node groups to the root admin folder
+
+CHAINS = ['']
+
+
+@pytest.mark.skip(reason="test only used to generate static accounts for a sync node")
+def test_generate_config(skale):
+ for schain_name in CHAINS:
+
+ current_config = read_json(f'{schain_name}.json')
+ original_group = current_config["skaleConfig"]["sChain"]["nodeGroups"]["0"]["nodes"]
+
+ schain = skale.schains.get_by_name(schain_name)
+
+ schain_nodes_with_schains = []
+ for key, value in original_group.items():
+ schain_nodes_with_schains.append({
+ 'id': int(key),
+ 'publicKey': value[2]
+ })
+
+ base_config = SChainBaseConfig(BASE_SCHAIN_CONFIG_FILEPATH)
+
+ print('base_config')
+ print(base_config.config)
+ print(base_config.config['accounts'])
+
+ # assert False
+
+ dynamic_accounts = generate_dynamic_accounts(
+ schain=schain,
+ schain_nodes=schain_nodes_with_schains
+ )
+
+ accounts = {
+ **base_config.config['accounts'],
+ **dynamic_accounts
+ }
+
+ with open(f'accounts/schain-{schain_name}.json', 'w') as outfile:
+ json.dump({'accounts': accounts}, outfile, indent=4)
diff --git a/sync_node.py b/sync_node.py
new file mode 100644
index 000000000..77746aad9
--- /dev/null
+++ b/sync_node.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2022 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import os
+import time
+import logging
+from typing import Dict
+
+from skale import Skale, SkaleIma
+from skale.schain_config.ports_allocation import get_schain_base_port_on_node
+
+from core.schains.process_manager import run_pm_schain
+from core.node_config import NodeConfig
+from core.ima.schain import update_predeployed_ima
+
+from tools.logger import init_sync_logger
+from tools.configs.web3 import ENDPOINT, ABI_FILEPATH
+from tools.configs.ima import MAINNET_IMA_ABI_FILEPATH
+
+from web.models.schain import create_tables
+from web.migrations import migrate
+
+
+init_sync_logger()
+logger = logging.getLogger(__name__)
+
+SLEEP_INTERVAL = 180
+WORKER_RESTART_SLEEP_INTERVAL = 2
+
+SCHAIN_NAME = os.environ.get('SCHAIN_NAME')
+
+
+def monitor(skale, skale_ima, node_config, schain: Dict) -> None:
+ while True:
+ try:
+ run_pm_schain(skale, skale_ima, node_config, schain)
+ except Exception:
+ logger.exception('Process manager procedure failed!')
+ logger.info(
+ f'Sleeping for {SLEEP_INTERVAL}s after run_process_manager'
+ )
+ time.sleep(SLEEP_INTERVAL)
+
+
+def worker(schain_name: str):
+ skale = Skale(ENDPOINT, ABI_FILEPATH)
+ skale_ima = SkaleIma(ENDPOINT, MAINNET_IMA_ABI_FILEPATH)
+
+ if not skale.schains_internal.is_schain_exist(schain_name):
+ logger.error(f'Provided SKALE Chain does not exist: {schain_name}')
+ exit(1)
+
+ schain = skale.schains.get_by_name(schain_name)
+ node_config = NodeConfig()
+
+ schain_nodes = skale.schains_internal.get_node_ids_for_schain(schain_name)
+ if not node_config.id:
+ node_config.id = schain_nodes[0]
+
+ node = skale.nodes.get(node_config.id)
+ if node_config.schain_base_port == -1:
+ schains_on_node = skale.schains.get_schains_for_node(node_config.id)
+ node_config.schain_base_port = get_schain_base_port_on_node(
+ schains_on_node,
+ schain_name,
+ node['port']
+ )
+
+ logger.info(f'Node {node_config.id} will be used as a current node')
+ monitor(skale, skale_ima, node_config, schain)
+
+
+def main():
+ if SCHAIN_NAME is None:
+ raise Exception('You should provide SCHAIN_NAME')
+ while True:
+ try:
+ create_tables()
+ migrate()
+ update_predeployed_ima()
+ worker(SCHAIN_NAME)
+ except Exception:
+ logger.exception('Sync node worker failed')
+ time.sleep(WORKER_RESTART_SLEEP_INTERVAL)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/conftest.py b/tests/conftest.py
index 41685e421..8b34c172f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -9,6 +9,7 @@
import docker
import pytest
+import yaml
from skale import SkaleManager
@@ -46,7 +47,7 @@
from core.schains.skaled_status import init_skaled_status, SkaledStatus
from core.schains.config.skale_manager_opts import SkaleManagerOpts
-from tools.configs import META_FILEPATH, SSL_CERTIFICATES_FILEPATH
+from tools.configs import CONFIG_FOLDER, ENV_TYPE, META_FILEPATH, SSL_CERTIFICATES_FILEPATH
from tools.configs.containers import CONTAINERS_FILEPATH
from tools.configs.ima import SCHAIN_IMA_ABI_FILEPATH
from tools.configs.schains import SCHAINS_DIR_PATH
@@ -65,6 +66,7 @@
generate_cert,
generate_schain_config,
get_test_rule_controller,
+ IMA_MIGRATION_TS,
init_skale_from_wallet,
init_skale_ima,
upsert_schain_record_with_config
@@ -511,6 +513,7 @@ def schain_checks(schain_config, schain_db, current_nodes, rule_controller, esta
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -617,3 +620,15 @@ def upstreams(schain_db, schain_config):
yield files
finally:
shutil.rmtree(config_folder, ignore_errors=True)
+
+
+@pytest.fixture
+def ima_migration_schedule(schain_db):
+ name = schain_db
+ try:
+ migration_schedule_path = os.path.join(CONFIG_FOLDER, 'ima_migration_schedule.yaml')
+ with open(migration_schedule_path, 'w') as migration_schedule_file:
+ yaml.dump({ENV_TYPE: {name: IMA_MIGRATION_TS}}, migration_schedule_file)
+ yield migration_schedule_path
+ finally:
+ os.remove(migration_schedule_path)
diff --git a/tests/docker_utils_test.py b/tests/docker_utils_test.py
index 23861d741..731d7baa0 100644
--- a/tests/docker_utils_test.py
+++ b/tests/docker_utils_test.py
@@ -3,10 +3,12 @@
import docker
import pytest
-from mock import Mock
+from mock import Mock, MagicMock
+
from types import SimpleNamespace
from core.schains.runner import (
+ run_schain_container,
get_container_name,
get_image_name,
get_container_info
@@ -45,6 +47,14 @@ def mocked_dutils(dutils):
return dutils
+@pytest.fixture
+def mocked_dutils_run_container(dutils):
+ class ContainerMock:
+ id = 123 # noqa
+ dutils.run_container = lambda *args, **kwargs: MagicMock(return_value=ContainerMock())
+ return dutils
+
+
def check_schain_container(schain_name: str, client: DockerUtils):
assert client.is_data_volume_exists(schain_name)
@@ -102,6 +112,46 @@ def test_run_schain_container(
check_schain_container(schain_name, dutils)
+@mock.patch(
+ 'core.schains.runner.get_container_name',
+ return_value='skaled-mock'
+)
+def test_run_schain_container_sync(
+ mocked_dutils_run_container,
+ schain_config,
+ cleanup_container,
+ cert_key_pair
+):
+ schain_name = schain_config['skaleConfig']['sChain']['schainName']
+ schain_data = get_schain_contracts_data(schain_name)
+
+ run_schain_container(
+ schain_data,
+ dutils=mocked_dutils_run_container,
+ )
+ assert '-historic' not in mocked_dutils_run_container.run_container.call_args[0][0]
+ assert mocked_dutils_run_container.run_container.call_args[1].get('cpu_shares')
+ assert mocked_dutils_run_container.run_container.call_args[1].get('mem_limit')
+
+ run_schain_container(
+ schain_data,
+ dutils=mocked_dutils_run_container,
+ sync_node=True,
+ historic_state=True
+ )
+ assert '-historic' in mocked_dutils_run_container.run_container.call_args[0][0]
+ assert not mocked_dutils_run_container.run_container.call_args[1].get('cpu_shares')
+ assert not mocked_dutils_run_container.run_container.call_args[1].get('mem_limit')
+
+
+def test_get_image_name_sync():
+ image_name = get_image_name(image_type=SCHAIN_CONTAINER)
+ assert '-historic' not in image_name
+
+ image_name = get_image_name(image_type=SCHAIN_CONTAINER, historic_state=True)
+ assert '-historic' in image_name
+
+
@mock.patch(
'core.schains.runner.get_image_name',
return_value='skaled-mock'
@@ -138,9 +188,9 @@ def test_not_existed_docker_objects(dutils):
def test_restart_all_schains(mocked_dutils):
schain_names = ['test1', 'test2', 'test3']
- schain_image = get_image_name(SCHAIN_CONTAINER)
+ schain_image = get_image_name(image_type=SCHAIN_CONTAINER)
cont_names = [
- get_container_name(SCHAIN_CONTAINER, name)
+ get_container_name(image_type=SCHAIN_CONTAINER, schain_name=name)
for name in schain_names
]
start_time = {}
diff --git a/tests/filebeat_test.py b/tests/filebeat_test.py
index 1e9ad79ae..97bb19da6 100644
--- a/tests/filebeat_test.py
+++ b/tests/filebeat_test.py
@@ -1,7 +1,7 @@
import os
import mock
-from core.filebeat import filebeat_config_processed
-from tools.configs.filebeat import NODE_DATA_PATH
+from core.monitoring import filebeat_config_processed
+from tools.configs.monitoring import NODE_DATA_PATH
PROCESSED_FILEBEAT_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'filebeat_processed.yml')
@@ -9,5 +9,5 @@
def test_filebeat_config_processed():
assert not filebeat_config_processed()
- with mock.patch('core.filebeat.FILEBEAT_CONFIG_PATH', PROCESSED_FILEBEAT_CONFIG_PATH):
+ with mock.patch('core.monitoring.FILEBEAT_CONFIG_PATH', PROCESSED_FILEBEAT_CONFIG_PATH):
assert filebeat_config_processed()
diff --git a/tests/firewall/rule_controller_test.py b/tests/firewall/rule_controller_test.py
index e0aa26a15..d4f04d598 100644
--- a/tests/firewall/rule_controller_test.py
+++ b/tests/firewall/rule_controller_test.py
@@ -150,6 +150,50 @@ def test_schain_rule_controller_configure():
own_ip = '1.1.1.1'
node_ips = ['1.1.1.1', '2.2.2.2', '3.3.3.3', '4.4.4.4']
base_port = 10000
+
+ src.configure(base_port=base_port)
+ with pytest.raises(NotInitializedError):
+ src.public_ports()
+
+ src.configure(base_port=base_port, node_ips=node_ips)
+ assert list(src.public_ports) == [10003, 10008, 10002, 10007, 10009]
+
+ expected_rules = {
+ SChainRule(port=10000, first_ip='1.1.1.1', last_ip=None),
+ SChainRule(port=10000, first_ip='2.2.2.2', last_ip=None),
+ SChainRule(port=10000, first_ip='3.3.3.3', last_ip=None),
+ SChainRule(port=10000, first_ip='4.4.4.4', last_ip=None),
+ SChainRule(port=10001, first_ip='1.1.1.1', last_ip=None),
+ SChainRule(port=10001, first_ip='2.2.2.2', last_ip=None),
+ SChainRule(port=10001, first_ip='3.3.3.3', last_ip=None),
+ SChainRule(port=10001, first_ip='4.4.4.4', last_ip=None),
+ SChainRule(port=10002, first_ip=None, last_ip=None),
+ SChainRule(port=10003, first_ip=None, last_ip=None),
+ SChainRule(port=10004, first_ip='1.1.1.1', last_ip=None),
+ SChainRule(port=10004, first_ip='2.2.2.2', last_ip=None),
+ SChainRule(port=10004, first_ip='3.3.3.3', last_ip=None),
+ SChainRule(port=10004, first_ip='4.4.4.4', last_ip=None),
+ SChainRule(port=10005, first_ip='1.1.1.1', last_ip=None),
+ SChainRule(port=10005, first_ip='2.2.2.2', last_ip=None),
+ SChainRule(port=10005, first_ip='3.3.3.3', last_ip=None),
+ SChainRule(port=10005, first_ip='4.4.4.4', last_ip=None),
+ SChainRule(port=10007, first_ip=None, last_ip=None),
+ SChainRule(port=10008, first_ip=None, last_ip=None),
+ SChainRule(port=10009, first_ip=None, last_ip=None),
+ SChainRule(port=10010, first_ip='1.1.1.1', last_ip=None),
+ SChainRule(port=10010, first_ip='2.2.2.2', last_ip=None),
+ SChainRule(port=10010, first_ip='3.3.3.3', last_ip=None),
+ SChainRule(port=10010, first_ip='4.4.4.4', last_ip=None)
+ }
+ src.configure(base_port=base_port, node_ips=node_ips)
+
+ assert not src.is_rules_synced()
+ assert list(src.expected_rules()) == list(sorted(expected_rules))
+ src.sync()
+ assert src.is_rules_synced()
+ assert list(src.expected_rules()) == list(sorted(expected_rules))
+ assert list(src.actual_rules()) == list(sorted(expected_rules))
+
expected_rules = {
SChainRule(port=10000, first_ip='2.2.2.2', last_ip=None),
SChainRule(port=10000, first_ip='3.3.3.3', last_ip=None),
@@ -173,6 +217,7 @@ def test_schain_rule_controller_configure():
SChainRule(port=10010, first_ip='4.4.4.4', last_ip=None)
}
src.configure(base_port=base_port, own_ip=own_ip, node_ips=node_ips)
+
assert not src.is_rules_synced()
assert list(src.expected_rules()) == list(sorted(expected_rules))
src.sync()
diff --git a/tests/helper_test.py b/tests/helper_test.py
index 762225232..538690185 100644
--- a/tests/helper_test.py
+++ b/tests/helper_test.py
@@ -1,4 +1,4 @@
-from tools.helper import is_address_contract
+from tools.helper import is_address_contract, no_hyphens
from tools.configs.web3 import ZERO_ADDRESS
@@ -6,3 +6,10 @@ def test_is_address_contract(skale):
assert not is_address_contract(skale.web3, ZERO_ADDRESS)
assert is_address_contract(skale.web3, skale.manager.address)
assert is_address_contract(skale.web3, skale.nodes.address)
+
+
+def test_no_hyphen():
+ assert no_hyphens('too') == 'too'
+ assert no_hyphens('too-boo') == 'too_boo'
+ assert no_hyphens('too-boo_goo') == 'too_boo_goo'
+ assert no_hyphens('too_goo') == 'too_goo'
diff --git a/tests/migrations_test.py b/tests/migrations_test.py
index f51992d65..c744e6250 100644
--- a/tests/migrations_test.py
+++ b/tests/migrations_test.py
@@ -1,4 +1,5 @@
import os
+from datetime import datetime
import pytest
from peewee import CharField, Model, SqliteDatabase
@@ -14,7 +15,8 @@
add_monitor_last_seen_field,
add_monitor_id_field,
add_config_version_field,
- add_restart_count_field
+ add_restart_count_field,
+ add_ssl_change_date_field
)
@@ -110,3 +112,9 @@ def test_add_failed_rpc_count_field(upserted_db, migrator, model):
add_failed_rpc_count_field(upserted_db, migrator)
for r in model.select().execute():
r.restart_count == 0
+
+
+def test_add_ssl_change_date_field(upserted_db, migrator, model):
+ add_ssl_change_date_field(upserted_db, migrator)
+ for r in model.select().execute():
+ r.ssl_change_date < datetime.now()
diff --git a/tests/monitoring_test.py b/tests/monitoring_test.py
new file mode 100644
index 000000000..c5eb74f13
--- /dev/null
+++ b/tests/monitoring_test.py
@@ -0,0 +1,67 @@
+import os
+
+import pytest
+
+from core.monitoring import TelegrafNotConfiguredError, update_telegraf_service
+from tools.configs.monitoring import (
+ TELEGRAF_TEMPLATE_PATH,
+ TELEGRAF_CONFIG_PATH
+)
+
+
+CONFIG_TEMPLATE = """
+[agent]
+ interval = "60s"
+ hostname = "{{ ip }}"
+ omit_hostname = false
+
+[global_tags]
+ node_id = "{{ node_id }}"
+
+[[outputs.db]]
+ alias = "db"
+ urls = ["{{ url }}"]
+
+"""
+
+
+@pytest.fixture
+def cleanup_container(dutils):
+ try:
+ yield
+ finally:
+ dutils.safe_rm('skale_telegraf')
+
+
+@pytest.fixture
+def telegraf_template():
+ try:
+ with open(TELEGRAF_TEMPLATE_PATH, 'w') as template:
+ template.write(CONFIG_TEMPLATE)
+ yield TELEGRAF_TEMPLATE_PATH
+ finally:
+ os.remove(TELEGRAF_TEMPLATE_PATH)
+ os.remove(TELEGRAF_CONFIG_PATH)
+
+
+def test_update_telegraf_service(telegraf_template, cleanup_container, dutils):
+ node_id = 1
+ node_ip = '1.1.1.1'
+ with pytest.raises(TelegrafNotConfiguredError):
+ update_telegraf_service(
+ node_id=node_id,
+ node_ip='',
+ url='http://127.0.0.1:1231',
+ dutils=dutils
+ )
+
+ update_telegraf_service(
+ node_ip,
+ node_id,
+ url='http://127.0.0.1:1231',
+ dutils=dutils
+ )
+ with open(TELEGRAF_CONFIG_PATH) as config:
+ config = config.read()
+ assert config == '\n[agent]\n interval = "60s"\n hostname = "1.1.1.1"\n omit_hostname = false\n\n[global_tags]\n node_id = "1"\n\n[[outputs.db]]\n alias = "db"\n urls = ["http://127.0.0.1:1231"]\n' # noqa
+ assert dutils.is_container_running('skale_telegraf')
diff --git a/tests/node_test.py b/tests/node_test.py
index d664b2a86..afe16b0b5 100644
--- a/tests/node_test.py
+++ b/tests/node_test.py
@@ -35,7 +35,7 @@ def node(node_skales, skale, nodes):
node_data = skale.nodes.get(config.id)
config.name = node_data['name']
config.ip = ip_from_bytes(node_data['ip'])
- yield Node(node_skales[0], config)
+ return Node(node_skales[0], config)
@pytest.fixture
@@ -94,9 +94,10 @@ def test_create_insufficient_funds(unregistered_node):
def test_register_info(unregistered_node):
unregistered_node.config.id = None
ip, public_ip, port, name = generate_random_node_data()
+ assert unregistered_node.config.schain_base_port == -1
# Register new node and check that it successfully created on contracts
- with mock.patch('core.node.update_filebeat_service'):
+ with mock.patch('core.node.update_monitoring_services'):
res = unregistered_node.register(
ip,
public_ip,
@@ -104,6 +105,7 @@ def test_register_info(unregistered_node):
name,
domain_name=DEFAULT_DOMAIN_NAME
)
+ assert unregistered_node.config.schain_base_port == -1
assert res['status'] == 'ok'
res_data = res.get('data')
diff --git a/tests/routes/health_test.py b/tests/routes/health_test.py
index da2758371..0fe254ba2 100644
--- a/tests/routes/health_test.py
+++ b/tests/routes/health_test.py
@@ -70,6 +70,10 @@ def test_containers(skale_bp, dutils):
)
}
assert data == expected
+ for container_info in data['payload']:
+ field_map = {'cpu_shares': 0, 'mem_limit': 0, 'swap_limit': 0, 'swappiness': None}
+ for field, value in field_map.items():
+ assert container_info[field] == value
def test_containers_all(skale_bp, dutils, schain_db, cleanup_schain_containers):
@@ -85,6 +89,10 @@ def test_containers_all(skale_bp, dutils, schain_db, cleanup_schain_containers):
)
}
assert data == expected
+ for container_info in data['payload']:
+ field_map = {'cpu_shares': 0, 'mem_limit': 0, 'swap_limit': 0, 'swappiness': None}
+ for field, value in field_map.items():
+ assert container_info[field] == value
def test_schains_checks(skale_bp, skale, schain_on_contracts, schain_db, dutils):
diff --git a/tests/schain_record_test.py b/tests/schain_record_test.py
new file mode 100644
index 000000000..2ef4503b0
--- /dev/null
+++ b/tests/schain_record_test.py
@@ -0,0 +1,15 @@
+from web.models.schain import SChainRecord
+from core.schains.ssl import update_ssl_change_date, ssl_reload_needed
+
+
+def test_ssl_change_date_matches_no_certs(schain_db, ssl_folder):
+ schain_record = SChainRecord.get_by_name(schain_db)
+ assert not ssl_reload_needed(schain_record)
+
+
+def test_ssl_change_date_matches(schain_db, cert_key_pair):
+ schain_record = SChainRecord.get_by_name(schain_db)
+
+ assert ssl_reload_needed(schain_record)
+ update_ssl_change_date(schain_record)
+ assert not ssl_reload_needed(schain_record)
diff --git a/tests/schains/checks_test.py b/tests/schains/checks_test.py
index 0c09d7df4..2e86f4dca 100644
--- a/tests/schains/checks_test.py
+++ b/tests/schains/checks_test.py
@@ -96,6 +96,7 @@ def sample_false_checks(schain_config, schain_db, rule_controller, current_nodes
schain_record=schain_record,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
+ last_dkg_successful=True,
current_nodes=current_nodes,
estate=estate,
dutils=dutils
@@ -120,6 +121,7 @@ def rules_unsynced_checks(
rule_controller=uninited_rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -202,12 +204,11 @@ def test_exit_code_ok_check(schain_checks, sample_false_checks):
def test_ima_container_check(schain_checks, cleanup_ima_containers, dutils):
- dutils.is_container_running = lambda *args: True
ts = int(time.time())
mts = ts + 3600
name = schain_checks.name
schain = get_schain_contracts_data(name)
- image = get_image_name(type=IMA_CONTAINER)
+ image = get_image_name(image_type=IMA_CONTAINER)
# new_image = get_image_name(type=IMA_CONTAINER, new=True)
# if dutils.pulled(new_image):
@@ -230,8 +231,8 @@ def test_ima_container_check(schain_checks, cleanup_ima_containers, dutils):
mts = ts - 3600
with mock.patch('core.schains.checks.get_ima_migration_ts', return_value=mts):
assert not schain_checks.ima_container.status
- image = get_image_name(type=IMA_CONTAINER, new=True)
- run_ima_container(schain, mainnet_chain_id=1,
+ image = get_image_name(image_type=IMA_CONTAINER, new=True)
+ run_ima_container(schain, mainnet_chain_id=1, time_frame=900,
image=image, dutils=dutils)
assert schain_checks.ima_container.status
@@ -289,6 +290,7 @@ def test_init_checks(skale, schain_db, current_nodes, uninited_rule_controller,
rule_controller=uninited_rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -317,6 +319,7 @@ def test_exit_code(skale, rule_controller, schain_db, current_nodes, estate, dut
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -336,6 +339,7 @@ def test_process(skale, rule_controller, schain_db, current_nodes, estate, dutil
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -360,6 +364,7 @@ def test_get_all(schain_config, rule_controller, dutils, current_nodes, schain_d
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -382,6 +387,7 @@ def test_get_all(schain_config, rule_controller, dutils, current_nodes, schain_d
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -409,6 +415,7 @@ def test_get_all_with_save(node_config, rule_controller, current_nodes, dutils,
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -433,9 +440,11 @@ def test_config_updated(skale, rule_controller, schain_db, current_nodes, estate
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=False,
estate=estate,
dutils=dutils
)
+ assert checks.last_dkg_successful.status is False
assert checks.config_updated
upstream_path = UpstreamConfigFilename(
@@ -454,6 +463,7 @@ def test_config_updated(skale, rule_controller, schain_db, current_nodes, estate
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
@@ -467,7 +477,9 @@ def test_config_updated(skale, rule_controller, schain_db, current_nodes, estate
rule_controller=rule_controller,
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate,
dutils=dutils
)
+ assert checks.last_dkg_successful.status is True
assert not checks.config_updated
diff --git a/tests/schains/cleaner_test.py b/tests/schains/cleaner_test.py
index e8319474c..4db2e9f45 100644
--- a/tests/schains/cleaner_test.py
+++ b/tests/schains/cleaner_test.py
@@ -255,6 +255,7 @@ def test_cleanup_schain(
schain_name,
current_nodes=current_nodes,
sync_agent_ranges=[],
+ last_dkg_successful=True,
rotation_id=0,
estate=estate,
dutils=dutils
diff --git a/tests/schains/cmd_test.py b/tests/schains/cmd_test.py
index a87a25aaf..0ba66b269 100644
--- a/tests/schains/cmd_test.py
+++ b/tests/schains/cmd_test.py
@@ -7,7 +7,7 @@
from tools.configs.containers import SHARED_SPACE_CONTAINER_PATH
from tools.configs import SGX_SERVER_URL
-from tools.configs.ima import IMA_ENDPOINT
+from tools.configs.web3 import ENDPOINT
def test_get_schain_container_cmd(schain_config, cert_key_pair):
@@ -17,10 +17,9 @@ def test_get_schain_container_cmd(schain_config, cert_key_pair):
ssl_key_path, ssl_cert_path = get_ssl_filepath()
expected_opts = (
f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
- f'--https-port 10008 --ws-port 10002 --wss-port 10007 --sgx-url {SGX_SERVER_URL} '
- f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data '
- f'--main-net-url {IMA_ENDPOINT} -v 3 '
- f'--web3-trace --enable-debug-behavior-apis '
+ f'--https-port 10008 --ws-port 10002 --wss-port 10007 --main-net-url {ENDPOINT} '
+ f'--sgx-url {SGX_SERVER_URL} --shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data '
+ '-v 3 --web3-trace --enable-debug-behavior-apis '
f'--aa no --ssl-key {ssl_key_path} --ssl-cert {ssl_cert_path}'
)
assert container_opts == expected_opts
@@ -28,9 +27,9 @@ def test_get_schain_container_cmd(schain_config, cert_key_pair):
container_opts = get_schain_container_cmd(schain_name, enable_ssl=False)
expected_opts = (
f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
- f'--https-port 10008 --ws-port 10002 --wss-port 10007 --sgx-url {SGX_SERVER_URL} '
- f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data '
- f'--main-net-url {IMA_ENDPOINT} -v 3 --web3-trace '
+ f'--https-port 10008 --ws-port 10002 --wss-port 10007 --main-net-url {ENDPOINT} '
+ f'--sgx-url {SGX_SERVER_URL} '
+ f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data -v 3 --web3-trace '
f'--enable-debug-behavior-apis --aa no'
)
assert container_opts == expected_opts
@@ -39,9 +38,9 @@ def test_get_schain_container_cmd(schain_config, cert_key_pair):
schain_name, snapshot_from='1.1.1.1')
expected_opts = (
f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
- f'--https-port 10008 --ws-port 10002 --wss-port 10007 --sgx-url {SGX_SERVER_URL} '
- f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data '
- f'--main-net-url {IMA_ENDPOINT} -v 3 '
+ f'--https-port 10008 --ws-port 10002 --wss-port 10007 --main-net-url {ENDPOINT} '
+ f'--sgx-url {SGX_SERVER_URL} '
+ f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data -v 3 '
f'--web3-trace --enable-debug-behavior-apis '
f'--aa no --ssl-key {ssl_key_path} --ssl-cert {ssl_cert_path} '
'--no-snapshot-majority 1.1.1.1'
@@ -51,9 +50,9 @@ def test_get_schain_container_cmd(schain_config, cert_key_pair):
container_opts = get_schain_container_cmd(schain_name, snapshot_from='')
expected_opts = (
f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
- f'--https-port 10008 --ws-port 10002 --wss-port 10007 --sgx-url {SGX_SERVER_URL} '
- f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data '
- f'--main-net-url {IMA_ENDPOINT} -v 3 '
+ f'--https-port 10008 --ws-port 10002 --wss-port 10007 --main-net-url {ENDPOINT} '
+ f'--sgx-url {SGX_SERVER_URL} '
+ f'--shared-space-path {SHARED_SPACE_CONTAINER_PATH}/data -v 3 '
f'--web3-trace --enable-debug-behavior-apis '
f'--aa no --ssl-key {ssl_key_path} --ssl-cert {ssl_cert_path}'
)
@@ -70,3 +69,16 @@ def test_get_schain_container_sync_opts():
assert sync_opts == [
'--download-snapshot readfromconfig'
]
+
+
+def test_get_schain_container_cmd_sync_node(schain_config, cert_key_pair):
+ schain_name = schain_config['skaleConfig']['sChain']['schainName']
+ container_opts = get_schain_container_cmd(schain_name, enable_ssl=False, sync_node=True)
+ config_filepath = get_skaled_container_config_path(schain_name)
+
+ expected_opts = (
+ f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
+ f'--https-port 10008 --ws-port 10002 --wss-port 10007 --main-net-url {ENDPOINT} '
+ f'-v 3 --web3-trace --enable-debug-behavior-apis --aa no'
+ )
+ assert container_opts == expected_opts
diff --git a/tests/schains/config/generator_test.py b/tests/schains/config/generator_test.py
index 71653b491..570f766c4 100644
--- a/tests/schains/config/generator_test.py
+++ b/tests/schains/config/generator_test.py
@@ -3,14 +3,6 @@
from pathlib import Path
import pytest
-
-from core.schains.config.generator import (
- generate_schain_config_with_skale, generate_schain_config, get_schain_originator
-)
-from core.schains.config.predeployed import PROXY_ADMIN_PREDEPLOYED_ADDRESS
-from tools.configs.schains import SCHAINS_DIR_PATH
-from core.schains.config.helper import get_schain_id
-
from etherbase_predeployed import ETHERBASE_ADDRESS, ETHERBASE_IMPLEMENTATION_ADDRESS
from marionette_predeployed import MARIONETTE_ADDRESS, MARIONETTE_IMPLEMENTATION_ADDRESS
from filestorage_predeployed import FILESTORAGE_ADDRESS, FILESTORAGE_IMPLEMENTATION_ADDRESS
@@ -21,15 +13,24 @@
from multisigwallet_predeployed import MULTISIGWALLET_ADDRESS
from ima_predeployed.generator import MESSAGE_PROXY_FOR_SCHAIN_ADDRESS
+from core.schains.config.generator import (
+ generate_schain_config_with_skale, generate_schain_config, get_schain_originator
+)
+from core.schains.config.helper import get_schain_id
+from core.schains.config.predeployed import PROXY_ADMIN_PREDEPLOYED_ADDRESS
+from tools.configs.schains import SCHAINS_DIR_PATH
+from tools.node_options import NodeOptions
+
NODE_ID = 1
ECDSA_KEY_NAME = 'TEST:KEY:NAME'
+COMMON_BLS_PUBLIC_KEY = [123, 456, 789, 123],
SECRET_KEY = {
"key_share_name": "BLS_KEY:SCHAIN_ID:1:NODE_ID:0:DKG_ID:0",
"t": 3,
"n": 4,
- "common_public_key": [123, 456, 789, 123],
+ "common_public_key": COMMON_BLS_PUBLIC_KEY,
"public_key": [
"123",
"456",
@@ -45,15 +46,6 @@
TEST_ORIGINATOR_ADDRESS = '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34'
TEST_MAINNET_OWNER_ADDRESS = '0x30E1C96277735B03E59B3098204fd04FD0e78a46'
-TEST_SCHAIN_NODE_WITH_SCHAINS = [{
- 'name': 'test',
- 'ip': b'\x01\x02\x03\x04',
- 'publicIP': b'\x01\x02\x03\x04',
- 'publicKey': '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34',
- 'port': 10000,
- 'id': 1,
- 'schains': [{'name': 'test_schain'}]
-}]
TEST_NODE = {'id': 1, 'name': 'test', 'publicKey': '0x5556', 'port': 10000}
@@ -75,6 +67,27 @@
'multitransactionMode': True
}
+SCHAIN_WITH_STATIC_ACCOUNTS = {
+ 'name': 'static_chain',
+ 'partOfNode': 0,
+ 'generation': 1,
+ 'mainnetOwner': TEST_MAINNET_OWNER_ADDRESS,
+ 'originator': TEST_ORIGINATOR_ADDRESS,
+ 'multitransactionMode': True
+}
+
+
+def get_schain_node_with_schains(schain_name: str) -> list:
+ return [{
+ 'name': 'test',
+ 'ip': b'\x01\x02\x03\x04',
+ 'publicIP': b'\x01\x02\x03\x04',
+ 'publicKey': '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34',
+ 'port': 10000,
+ 'id': 1,
+ 'schains': [{'name': schain_name}]
+ }]
+
@pytest.fixture
def schain_secret_key_file(schain_on_contracts, predeployed_ima):
@@ -127,12 +140,16 @@ def check_node_bls_keys(info, index):
def check_node_info(node_id, info):
- keys = ['nodeID', 'nodeName', 'basePort', 'httpRpcPort', 'httpsRpcPort',
- 'wsRpcPort', 'wssRpcPort', 'bindIP', 'logLevel', 'logLevelConfig',
- 'imaMessageProxySChain', 'imaMessageProxyMainNet',
- 'ecdsaKeyName', 'wallets', 'minCacheSize',
- 'maxCacheSize', 'collectionQueueSize', 'collectionDuration',
- 'transactionQueueSize', 'maxOpenLeveldbFiles']
+ keys = [
+ 'nodeID', 'nodeName', 'basePort', 'httpRpcPort', 'httpsRpcPort',
+ 'wsRpcPort', 'wssRpcPort', 'bindIP', 'logLevel', 'logLevelConfig',
+ 'imaMessageProxySChain', 'imaMessageProxyMainNet',
+ 'ecdsaKeyName', 'wallets', 'minCacheSize',
+ 'maxCacheSize', 'collectionQueueSize', 'collectionDuration',
+ 'transactionQueueSize', 'maxOpenLeveldbFiles', 'info-acceptors', 'imaMonitoringPort',
+ 'skale-manager', 'syncNode', 'pg-threads', 'pg-threads-limit'
+ ]
+
check_keys(info, keys)
assert info['nodeID'] == node_id
check_node_ports(info)
@@ -170,7 +187,7 @@ def check_schain_info(node_ids, schain_info):
def check_config(node_id, all_node_ids, config):
check_keys(
config,
- ['sealEngine', 'params', 'genesis', 'accounts', 'skaleConfig']
+ ['sealEngine', 'params', 'unddos', 'genesis', 'accounts', 'skaleConfig']
)
assert config['params']['skaleDisableChainIdCheck'] is True
check_node_info(node_id, config['skaleConfig']['nodeInfo'])
@@ -179,19 +196,22 @@ def check_config(node_id, all_node_ids, config):
def test_generate_schain_config_with_skale(
skale,
+ node_config,
schain_on_contracts,
schain_secret_key_file
):
schain_name = schain_on_contracts
node_ids = skale.schains_internal.get_node_ids_for_schain(schain_name)
current_node_id = node_ids[0]
+ node_config.id = current_node_id
schain_config = generate_schain_config_with_skale(
skale=skale,
schain_name=schain_name,
- node_id=current_node_id,
+ node_config=node_config,
rotation_data={'rotation_id': 0, 'leaving_node': 1},
ecdsa_key_name=ECDSA_KEY_NAME,
- generation=0
+ generation=0,
+ node_options=NodeOptions()
)
check_config(current_node_id, node_ids, schain_config.to_dict())
@@ -208,7 +228,6 @@ def test_generate_schain_config_gen0(schain_secret_key_file_default_chain, skale
node_id, generation, rotation_id = 1, 0, 0
ecdsa_key_name = 'test'
- schains_on_node = [{'name': 'test_schain'}]
node_groups = {}
schain_config = generate_schain_config(
@@ -216,13 +235,14 @@ def test_generate_schain_config_gen0(schain_secret_key_file_default_chain, skale
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
- schains_on_node=schains_on_node,
rotation_id=rotation_id,
- schain_nodes_with_schains=TEST_SCHAIN_NODE_WITH_SCHAINS,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
node_groups=node_groups,
generation=generation,
is_owner_contract=False,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000
)
config = schain_config.to_dict()
@@ -233,7 +253,6 @@ def test_generate_schain_config_gen0(schain_secret_key_file_default_chain, skale
def test_generate_schain_config_gen1(schain_secret_key_file_default_chain, skale_manager_opts):
node_id, generation, rotation_id = 1, 1, 0
ecdsa_key_name = 'test'
- schains_on_node = [{'name': 'test_schain'}]
node_groups = {}
schain_config = generate_schain_config(
@@ -241,13 +260,14 @@ def test_generate_schain_config_gen1(schain_secret_key_file_default_chain, skale
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
- schains_on_node=schains_on_node,
rotation_id=rotation_id,
- schain_nodes_with_schains=TEST_SCHAIN_NODE_WITH_SCHAINS,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
node_groups=node_groups,
generation=generation,
is_owner_contract=True,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000
)
config = schain_config.to_dict()
@@ -279,7 +299,6 @@ def test_generate_schain_config_gen1_pk_owner(
):
node_id, generation, rotation_id = 1, 1, 0
ecdsa_key_name = 'test'
- schains_on_node = [{'name': 'test_schain'}]
node_groups = {}
schain_config = generate_schain_config(
@@ -287,13 +306,14 @@ def test_generate_schain_config_gen1_pk_owner(
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
- schains_on_node=schains_on_node,
rotation_id=rotation_id,
- schain_nodes_with_schains=TEST_SCHAIN_NODE_WITH_SCHAINS,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
node_groups=node_groups,
generation=generation,
is_owner_contract=False,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000
)
config = schain_config.to_dict()
@@ -307,7 +327,6 @@ def test_generate_schain_config_gen2_schain_id(
):
node_id, generation, rotation_id = 1, 2, 0
ecdsa_key_name = 'test'
- schains_on_node = [{'name': 'test_schain'}]
node_groups = {}
schain_config = generate_schain_config(
@@ -315,13 +334,14 @@ def test_generate_schain_config_gen2_schain_id(
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
- schains_on_node=schains_on_node,
rotation_id=rotation_id,
- schain_nodes_with_schains=TEST_SCHAIN_NODE_WITH_SCHAINS,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
node_groups=node_groups,
generation=generation,
is_owner_contract=False,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000
)
config = schain_config.to_dict()
assert config['skaleConfig']['sChain']['schainID'] == 2755779573749746
@@ -337,13 +357,14 @@ def test_generate_schain_config_gen1_schain_id(
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name='test',
- schains_on_node=[{'name': 'test_schain'}],
rotation_id=rotation_id,
- schain_nodes_with_schains=TEST_SCHAIN_NODE_WITH_SCHAINS,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
node_groups={},
generation=generation,
is_owner_contract=False,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000
)
config = schain_config.to_dict()
assert config['skaleConfig']['sChain']['schainID'] == 1
@@ -359,13 +380,14 @@ def test_generate_schain_config_gen0_schain_id(
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name='test',
- schains_on_node=[{'name': 'test_schain'}],
rotation_id=rotation_id,
- schain_nodes_with_schains=TEST_SCHAIN_NODE_WITH_SCHAINS,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
node_groups={},
generation=generation,
is_owner_contract=False,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000
)
config = schain_config.to_dict()
assert config['skaleConfig']['sChain']['schainID'] == 1
@@ -374,15 +396,17 @@ def test_generate_schain_config_gen0_schain_id(
def test_generate_schain_config_with_skale_gen2(
skale,
schain_on_contracts,
- schain_secret_key_file
+ schain_secret_key_file,
+ node_config
):
schain_name = schain_on_contracts
node_ids = skale.schains_internal.get_node_ids_for_schain(schain_name)
current_node_id = node_ids[0]
+ node_config.id = current_node_id
schain_config = generate_schain_config_with_skale(
skale=skale,
schain_name=schain_name,
- node_id=current_node_id,
+ node_config=node_config,
rotation_data={'rotation_id': 0, 'leaving_node': 1},
ecdsa_key_name=ECDSA_KEY_NAME,
generation=2
@@ -398,3 +422,181 @@ def test_get_schain_originator(predeployed_ima):
originator = get_schain_originator(SCHAIN_WITH_ORIGINATOR)
assert originator == TEST_ORIGINATOR_ADDRESS
+
+
+def test_generate_sync_node_config(
+ schain_secret_key_file_default_chain,
+ skale_manager_opts
+):
+ node_id, generation, rotation_id = 1, 1, 0
+ ecdsa_key_name = 'test'
+ node_groups = {}
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITHOUT_ORIGINATOR,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True
+ )
+ config = schain_config.to_dict()
+
+ assert config['skaleConfig']['nodeInfo']['syncNode']
+ assert config['skaleConfig']['sChain']['dbStorageLimit'] == 284999761
+
+
+def test_generate_sync_node_config_archive_catchup(
+ schain_secret_key_file_default_chain,
+ skale_manager_opts
+):
+ node_id, generation, rotation_id = 1, 1, 0
+ ecdsa_key_name = 'test'
+ node_groups = {}
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITHOUT_ORIGINATOR,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True
+ )
+ config = schain_config.to_dict()
+
+ assert not config['skaleConfig']['nodeInfo'].get('syncFromCatchup')
+ assert not config['skaleConfig']['nodeInfo'].get('archiveMode')
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < \
+ 1000000000000000000
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITHOUT_ORIGINATOR,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True,
+ archive=False,
+ catchup=True
+ )
+ config = schain_config.to_dict()
+
+ assert config['skaleConfig']['nodeInfo'].get('syncFromCatchup')
+ assert config['skaleConfig']['nodeInfo'].get('archiveMode') is False
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < \
+ 1000000000000000000
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITHOUT_ORIGINATOR,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=False,
+ archive=False,
+ catchup=True
+ )
+ config = schain_config.to_dict()
+
+ assert config['skaleConfig']['nodeInfo'].get('syncFromCatchup') is None
+ assert config['skaleConfig']['nodeInfo'].get('archiveMode') is None
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < \
+ 1000000000000000000
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITHOUT_ORIGINATOR,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True,
+ archive=True,
+ catchup=True
+ )
+ config = schain_config.to_dict()
+
+ assert config['skaleConfig']['nodeInfo'].get('syncFromCatchup')
+ assert config['skaleConfig']['nodeInfo'].get('archiveMode')
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') == \
+ 1000000000000000000
+
+
+def test_generate_sync_node_config_static_accounts(
+ schain_secret_key_file_default_chain,
+ skale_manager_opts
+):
+ node_id, generation, rotation_id = 1, 1, 0
+ ecdsa_key_name = 'test'
+ node_groups = {}
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITH_STATIC_ACCOUNTS,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('static_chain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True
+ )
+ config = schain_config.to_dict()
+ assert config['accounts'].get('0x1111111')
+ assert config['accounts']['0x1111111']['balance'] == '1000000000000000000000000000000'
+
+ schain_config = generate_schain_config(
+ schain=SCHAIN_WITH_ORIGINATOR,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True
+ )
+ config = schain_config.to_dict()
+ assert not config['accounts'].get('0x1111111')
diff --git a/tests/schains/config/node_info_test.py b/tests/schains/config/node_info_test.py
index 8a5686e9d..9b3278d41 100644
--- a/tests/schains/config/node_info_test.py
+++ b/tests/schains/config/node_info_test.py
@@ -5,12 +5,14 @@
from core.schains.types import SchainType
from tools.configs import SGX_SSL_KEY_FILEPATH, SGX_SSL_CERT_FILEPATH
+COMMON_PUBLIC_KEY = [1, 2, 3, 4]
+
SECRET_KEY_MOCK = {
'key_share_name': 'BLS_KEY:SCHAIN_ID:1:NODE_ID:0:DKG_ID:0',
't': 1,
'n': 2,
- 'common_public_key': [1, 1, 1],
- 'public_key': ['1', '1', '1'],
+ 'common_public_key': COMMON_PUBLIC_KEY,
+ 'public_key': ['4', '3', '2', '1'],
}
SCHAIN_NAME = 'test_schain'
@@ -18,17 +20,48 @@
def test_generate_wallets_config():
with mock.patch('core.schains.config.node_info.read_json', return_value=SECRET_KEY_MOCK):
- wallets = generate_wallets_config('test_schain', 0)
+ wallets = generate_wallets_config(
+ 'test_schain',
+ 0,
+ sync_node=False,
+ nodes_in_schain=4,
+ common_bls_public_keys=COMMON_PUBLIC_KEY
+ )
assert wallets['ima']['keyShareName'] == SECRET_KEY_MOCK['key_share_name']
assert wallets['ima']['certFile'] == SGX_SSL_CERT_FILEPATH
assert wallets['ima']['keyFile'] == SGX_SSL_KEY_FILEPATH
assert wallets['ima']['commonBLSPublicKey0'] == '1'
- assert wallets['ima']['commonBLSPublicKey1'] == '1'
- assert wallets['ima']['commonBLSPublicKey2'] == '1'
- assert wallets['ima']['BLSPublicKey0'] == '1'
- assert wallets['ima']['BLSPublicKey1'] == '1'
- assert wallets['ima']['BLSPublicKey2'] == '1'
+ assert wallets['ima']['commonBLSPublicKey1'] == '2'
+ assert wallets['ima']['commonBLSPublicKey2'] == '3'
+ assert wallets['ima']['commonBLSPublicKey3'] == '4'
+ assert wallets['ima']['BLSPublicKey0'] == '4'
+ assert wallets['ima']['BLSPublicKey1'] == '3'
+ assert wallets['ima']['BLSPublicKey2'] == '2'
+ assert wallets['ima']['BLSPublicKey3'] == '1'
+
+
+def test_generate_wallets_config_sync_node():
+ with mock.patch('core.schains.config.node_info.read_json', return_value=SECRET_KEY_MOCK):
+ wallets = generate_wallets_config(
+ 'test_schain',
+ 0,
+ sync_node=True,
+ nodes_in_schain=4,
+ common_bls_public_keys=COMMON_PUBLIC_KEY
+ )
+
+ assert 'keyShareName' not in wallets['ima']
+ assert 'certFile' not in wallets['ima']
+ assert 'keyFile' not in wallets['ima']
+ assert 'BLSPublicKey0' not in wallets['ima']
+ assert 'BLSPublicKey1' not in wallets['ima']
+ assert 'BLSPublicKey2' not in wallets['ima']
+ assert 'BLSPublicKey3' not in wallets['ima']
+ assert wallets['ima']['commonBLSPublicKey0'] == '1'
+ assert wallets['ima']['commonBLSPublicKey1'] == '2'
+ assert wallets['ima']['commonBLSPublicKey2'] == '3'
+ assert wallets['ima']['commonBLSPublicKey3'] == '4'
def test_generate_current_node_info(
@@ -45,9 +78,11 @@ def test_generate_current_node_info(
ecdsa_key_name='123',
static_node_info=static_node_info,
schain={'name': _schain_name, 'partOfNode': 0},
- schains_on_node=[{'name': _schain_name, 'port': 10000}],
rotation_id=0,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ nodes_in_schain=4,
+ schain_base_port=10000,
+ common_bls_public_keys=COMMON_PUBLIC_KEY
)
current_node_info_dict = current_node_info.to_dict()
assert current_node_info_dict['nodeID'] == 1
@@ -69,12 +104,18 @@ def test_generate_current_node_info(
ecdsa_key_name='123',
static_node_info=static_node_info,
schain={'name': _schain_name, 'partOfNode': 0},
- schains_on_node=[{'name': _schain_name, 'port': 10000}],
rotation_id=0,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ nodes_in_schain=4,
+ schain_base_port=10000,
+ common_bls_public_keys=COMMON_PUBLIC_KEY
)
current_node_info_dict = current_node_info.to_dict()
assert current_node_info_dict['maxCacheSize'] == 16000000
+ assert current_node_info_dict['skale-manager'] == {
+ 'SchainsInternal': '0x1656',
+ 'Nodes': '0x7742'
+ }
def test_skale_manager_opts(
@@ -91,9 +132,11 @@ def test_skale_manager_opts(
ecdsa_key_name='123',
static_node_info=static_node_info,
schain={'name': _schain_name, 'partOfNode': 0},
- schains_on_node=[{'name': _schain_name, 'port': 10000}],
rotation_id=0,
- skale_manager_opts=skale_manager_opts
+ skale_manager_opts=skale_manager_opts,
+ nodes_in_schain=4,
+ schain_base_port=10000,
+ common_bls_public_keys=COMMON_PUBLIC_KEY
)
current_node_info_dict = current_node_info.to_dict()
assert current_node_info_dict['skale-manager'] == {
diff --git a/tests/schains/config/static_accounts_test.py b/tests/schains/config/static_accounts_test.py
new file mode 100644
index 000000000..bb4659281
--- /dev/null
+++ b/tests/schains/config/static_accounts_test.py
@@ -0,0 +1,14 @@
+from core.schains.config.static_accounts import is_static_accounts, static_accounts
+
+SCHAIN_NAME = 'test'
+
+
+def test_is_static_accounts():
+ assert is_static_accounts(SCHAIN_NAME)
+ assert not is_static_accounts('qwerty')
+
+
+def test_static_accounts():
+ accounts = static_accounts(SCHAIN_NAME)
+ assert isinstance(accounts, dict)
+ assert accounts.get('accounts', None)
diff --git a/tests/schains/config/static_params_test.py b/tests/schains/config/static_params_test.py
index 2759b128e..0fb7e4709 100644
--- a/tests/schains/config/static_params_test.py
+++ b/tests/schains/config/static_params_test.py
@@ -1,10 +1,18 @@
+from core.schains.config.helper import get_static_params
from core.schains.types import SchainType
from core.schains.config.static_params import (
get_automatic_repair_option,
+ get_schain_static_param,
get_static_schain_cmd,
get_static_schain_info,
get_static_node_info
)
+from tools.configs import ENV_TYPE
+
+
+TEST_SCHAIN_NAME = 'test-schain'
+LEGACY_TS_NAME = 'revertableFSPatchTimestamp'
+NEW_TS_NAME = 'contractStorageZeroValuePatchTimestamp'
def test_get_static_schain_cmd():
@@ -13,8 +21,9 @@ def test_get_static_schain_cmd():
def test_get_static_schain_info():
- schain_info = get_static_schain_info()
+ schain_info = get_static_schain_info(TEST_SCHAIN_NAME)
assert schain_info == {
+ "contractStorageZeroValuePatchTimestamp": 1500000,
"revertableFSPatchTimestamp": 1000000,
"contractStoragePatchTimestamp": 1000000,
"snapshotIntervalSec": 0,
@@ -24,6 +33,41 @@ def test_get_static_schain_info():
}
+def test_get_static_schain_info_custom_chain_ts():
+ custom_schain_info = get_static_schain_info(TEST_SCHAIN_NAME)
+ default_schain_info = get_static_schain_info('test')
+
+ assert custom_schain_info[LEGACY_TS_NAME] == default_schain_info[LEGACY_TS_NAME]
+ assert custom_schain_info[NEW_TS_NAME] != default_schain_info[NEW_TS_NAME]
+
+ assert custom_schain_info[NEW_TS_NAME] == 1500000
+ assert default_schain_info[NEW_TS_NAME] == 800000
+
+
+def test_get_schain_static_param():
+ static_params = get_static_params(ENV_TYPE)
+ legacy_ts_info = get_schain_static_param(
+ static_params['schain'][LEGACY_TS_NAME],
+ TEST_SCHAIN_NAME
+ )
+ assert legacy_ts_info == static_params['schain'].get(LEGACY_TS_NAME)
+
+ new_ts_info_custom_chain = get_schain_static_param(
+ static_params['schain'][NEW_TS_NAME],
+ TEST_SCHAIN_NAME
+ )
+ assert new_ts_info_custom_chain != static_params['schain'][NEW_TS_NAME]
+ assert new_ts_info_custom_chain == static_params['schain'][NEW_TS_NAME][TEST_SCHAIN_NAME]
+
+ new_ts_info_default_chain = get_schain_static_param(
+ static_params['schain'][NEW_TS_NAME],
+ 'test'
+ )
+ assert new_ts_info_default_chain != static_params['schain'][NEW_TS_NAME]
+ assert new_ts_info_default_chain != static_params['schain'][NEW_TS_NAME].get('test')
+ assert new_ts_info_default_chain == static_params['schain'][NEW_TS_NAME].get('default')
+
+
def test_get_static_node_info():
node_info_small = get_static_node_info(SchainType.small)
node_info_medium = get_static_node_info(SchainType.medium)
diff --git a/tests/schains/ima_test.py b/tests/schains/ima_test.py
index ff2fb0c19..18b3f584f 100644
--- a/tests/schains/ima_test.py
+++ b/tests/schains/ima_test.py
@@ -4,10 +4,12 @@
def test_get_ima_env(_schain_name, schain_config):
ima_env = get_ima_env(
schain_name=_schain_name,
- mainnet_chain_id=123
+ mainnet_chain_id=123,
+ time_frame=100
)
ima_env_dict = ima_env.to_dict()
assert len(ima_env_dict) == 23
assert ima_env_dict['CID_MAIN_NET'] == 123
assert ima_env_dict['RPC_PORT'] == 10010
+ assert ima_env_dict['TIME_FRAMING'] == 100
isinstance(ima_env_dict['CID_SCHAIN'], str)
diff --git a/tests/schains/monitor/action/config_action_test.py b/tests/schains/monitor/action/config_action_test.py
index 771769727..57c904ade 100644
--- a/tests/schains/monitor/action/config_action_test.py
+++ b/tests/schains/monitor/action/config_action_test.py
@@ -37,6 +37,7 @@ def config_checks(
schain_record=schain_record,
rotation_id=rotation_data['rotation_id'],
stream_version=CONFIG_STREAM,
+ last_dkg_successful=True,
current_nodes=current_nodes,
estate=estate
)
diff --git a/tests/schains/monitor/action/skaled_action_test.py b/tests/schains/monitor/action/skaled_action_test.py
index 1e0e8966b..3281ec6bd 100644
--- a/tests/schains/monitor/action/skaled_action_test.py
+++ b/tests/schains/monitor/action/skaled_action_test.py
@@ -17,11 +17,19 @@
from tools.configs.containers import SCHAIN_CONTAINER, IMA_CONTAINER
from web.models.schain import SChainRecord
+from tests.utils import IMA_MIGRATION_TS
+
CURRENT_TIMESTAMP = 1594903080
CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
-def run_ima_container_mock(schain: dict, mainnet_chain_id: int, image: str, dutils=None):
+def run_ima_container_mock(
+ schain: dict,
+ mainnet_chain_id: int,
+ image: str,
+ time_frame: int,
+ dutils=None
+):
image_name, container_name, _, _ = get_container_info(
IMA_CONTAINER, schain['name'])
image = image or image_name
@@ -40,7 +48,9 @@ def monitor_schain_container_mock(
download_snapshot=False,
start_ts=None,
abort_on_exit=True,
- dutils=None
+ dutils=None,
+ sync_node=False,
+ historic_state=False
):
image_name, container_name, _, _ = get_container_info(
SCHAIN_CONTAINER, schain['name'])
@@ -66,7 +76,8 @@ def skaled_checks(
schain_name=name,
schain_record=schain_record,
rule_controller=rule_controller,
- dutils=dutils
+ dutils=dutils,
+ sync_node=False
)
@@ -80,6 +91,7 @@ def skaled_am(
predeployed_ima,
secret_key,
ssl_folder,
+ ima_migration_schedule,
dutils,
skaled_checks
):
@@ -135,7 +147,9 @@ def test_skaled_container_with_snapshot_action(skaled_am):
download_snapshot=True,
start_ts=None,
abort_on_exit=True,
- dutils=skaled_am.dutils
+ dutils=skaled_am.dutils,
+ sync_node=False,
+ historic_state=False
)
assert monitor_schain_mock.call_count == 1
finally:
@@ -159,7 +173,9 @@ def test_skaled_container_snapshot_delay_start_action(skaled_am):
download_snapshot=True,
start_ts=ts,
abort_on_exit=True,
- dutils=skaled_am.dutils
+ dutils=skaled_am.dutils,
+ sync_node=False,
+ historic_state=False
)
assert monitor_schain_mock.call_count == 1
finally:
@@ -250,31 +266,27 @@ def test_recreated_schain_containers(
assert ima_ts > ima_created_ts
-def test_ima_container_action_new_chain(
+def test_ima_container_action_from_scratch(
skaled_am,
skaled_checks,
schain_config,
predeployed_ima,
ima_linked,
cleanup_ima,
+ ima_migration_schedule,
dutils
):
- with mock.patch(
- 'core.schains.monitor.containers.run_ima_container',
- run_ima_container_mock
- ):
- skaled_am.ima_container()
- containers = dutils.get_all_ima_containers(all=True)
- assert len(containers) == 1
- container_name = containers[0].name
- assert container_name == f'skale_ima_{skaled_am.name}'
- image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:2.0.0-beta.9'
+ skaled_am.ima_container()
+ containers = dutils.get_all_ima_containers(all=True)
+ assert len(containers) == 1
+ container_name = containers[0].name
+ assert container_name == f'skale_ima_{skaled_am.name}'
+ image = dutils.get_container_image_name(container_name)
+ assert image == 'skalenetwork/ima:2.0.0-beta.9'
-@pytest.mark.skip('Docker API GA issues need to be resolved')
-@mock.patch('core.schains.monitor.containers.run_ima_container', run_ima_container_mock)
-def test_ima_container_action_old_chain(
+# @pytest.mark.skip('Docker API GA issues need to be resolved')
+def test_ima_container_action_image_pulling(
skaled_am,
skaled_checks,
schain_config,
@@ -283,9 +295,8 @@ def test_ima_container_action_old_chain(
cleanup_ima,
dutils
):
- ts = int(time.time())
- mts = ts + 3600
- with mock.patch('core.schains.monitor.action.get_ima_migration_ts', return_value=mts):
+ dt = datetime.datetime.utcfromtimestamp(IMA_MIGRATION_TS - 5)
+ with freezegun.freeze_time(dt):
skaled_am.ima_container()
containers = dutils.get_all_ima_containers(all=True)
assert len(containers) == 1
@@ -296,8 +307,18 @@ def test_ima_container_action_old_chain(
assert image == 'skalenetwork/ima:2.0.0-develop.3'
assert dutils.pulled('skalenetwork/ima:2.0.0-beta.9')
- mts = ts - 5
- with mock.patch('core.schains.monitor.action.get_ima_migration_ts', return_value=mts):
+
+def test_ima_container_action_image_migration(
+ skaled_am,
+ skaled_checks,
+ schain_config,
+ predeployed_ima,
+ ima_linked,
+ cleanup_ima,
+ dutils
+):
+ dt = datetime.datetime.utcfromtimestamp(IMA_MIGRATION_TS + 5)
+ with freezegun.freeze_time(dt):
skaled_am.ima_container()
containers = dutils.get_all_ima_containers(all=True)
assert len(containers) == 1
@@ -307,12 +328,51 @@ def test_ima_container_action_old_chain(
assert image == 'skalenetwork/ima:2.0.0-beta.9'
+def test_ima_container_action_time_frame_migration(
+ skaled_am,
+ skaled_checks,
+ schain_config,
+ predeployed_ima,
+ ima_linked,
+ cleanup_ima,
+ dutils
+):
+ dt = datetime.datetime.utcfromtimestamp(IMA_MIGRATION_TS - 5)
+ with freezegun.freeze_time(dt):
+ with mock.patch('core.schains.monitor.containers.get_image_name',
+ return_value='skalenetwork/ima:2.0.0-beta.9'):
+ skaled_am.ima_container()
+ containers = dutils.get_all_ima_containers(all=True)
+ assert len(containers) == 1
+ container_name = containers[0].name
+ assert container_name == f'skale_ima_{skaled_am.name}'
+ image = dutils.get_container_image_name(container_name)
+ assert image == 'skalenetwork/ima:2.0.0-beta.9'
+ actual_time_frame = int(dutils.get_container_env_value(container_name, 'TIME_FRAMING'))
+ assert actual_time_frame == 1800
+
+ dt = datetime.datetime.utcfromtimestamp(IMA_MIGRATION_TS + 5)
+ with freezegun.freeze_time(dt):
+ with mock.patch('core.schains.monitor.containers.get_image_name',
+ return_value='skalenetwork/ima:2.0.0-beta.9'):
+ skaled_am.ima_container()
+ containers = dutils.get_all_ima_containers(all=True)
+ assert len(containers) == 1
+ container_name = containers[0].name
+ assert container_name == f'skale_ima_{skaled_am.name}'
+ image = dutils.get_container_image_name(container_name)
+ assert image == 'skalenetwork/ima:2.0.0-beta.9'
+ actual_time_frame = int(dutils.get_container_env_value(container_name, 'TIME_FRAMING'))
+ assert actual_time_frame == 900
+
+
def test_ima_container_action_not_linked(
skaled_am,
skaled_checks,
schain_db,
_schain_name,
cleanup_ima_containers,
+ ima_migration_schedule,
dutils
):
skaled_am.ima_container()
diff --git a/tests/schains/monitor/config_monitor_test.py b/tests/schains/monitor/config_monitor_test.py
index d7c211f65..71fbc2285 100644
--- a/tests/schains/monitor/config_monitor_test.py
+++ b/tests/schains/monitor/config_monitor_test.py
@@ -10,7 +10,7 @@
from core.schains.config.directory import schain_config_dir
from core.schains.monitor.action import ConfigActionManager
-from core.schains.monitor.config_monitor import RegularConfigMonitor
+from core.schains.monitor.config_monitor import RegularConfigMonitor, SyncConfigMonitor
from core.schains.external_config import ExternalConfig
from web.models.schain import SChainRecord
@@ -42,6 +42,7 @@ def config_checks(
rotation_id=rotation_data['rotation_id'],
stream_version=CONFIG_STREAM,
current_nodes=current_nodes,
+ last_dkg_successful=True,
estate=estate
)
@@ -84,9 +85,18 @@ def regular_config_monitor(config_am, config_checks):
)
+@pytest.fixture
+def sync_config_monitor(config_am, config_checks):
+ return SyncConfigMonitor(
+ action_manager=config_am,
+ checks=config_checks
+ )
+
+
def test_regular_config_monitor(schain_db, regular_config_monitor, rotation_data):
name = schain_db
rotation_id = rotation_data['rotation_id']
+
regular_config_monitor.run()
config_dir = schain_config_dir(name)
@@ -130,3 +140,76 @@ def test_regular_config_monitor_change_ip(
regular_config_monitor.am.cfm.sync_skaled_config_with_upstream()
regular_config_monitor.run()
assert econfig.reload_ts is None
+
+
+def test_sync_config_monitor(
+ skale,
+ schain_db,
+ config_am,
+ config_checks,
+ econfig,
+ estate,
+ rotation_data
+):
+ name = schain_db
+ config_dir = schain_config_dir(name)
+
+ rotation_id = rotation_data['rotation_id']
+ config_pattern = os.path.join(
+ config_dir,
+ f'schain_{name}_{rotation_id}_*.json'
+ )
+ assert len(glob.glob(config_pattern)) == 0
+
+ assert econfig.synced(estate)
+
+ estate.chain_id = 1
+ config_checks.estate = estate
+ config_am.estate = estate
+ assert not econfig.synced(estate)
+
+ sync_config_monitor = SyncConfigMonitor(
+ action_manager=config_am,
+ checks=config_checks
+ )
+ sync_config_monitor.run()
+ assert econfig.synced(estate)
+ config_filename = glob.glob(config_pattern)
+ assert os.path.isfile(config_filename[0])
+
+
+def test_sync_config_monitor_dkg_not_completed(
+ skale,
+ schain_db,
+ config_am,
+ config_checks,
+ econfig,
+ estate,
+ rotation_data
+):
+ name = schain_db
+ config_dir = schain_config_dir(name)
+
+ rotation_id = rotation_data['rotation_id']
+ config_pattern = os.path.join(
+ config_dir,
+ f'schain_{name}_{rotation_id}_*.json'
+ )
+ assert len(glob.glob(config_pattern)) == 0
+
+ assert econfig.synced(estate)
+
+ estate.chain_id = 1
+ config_checks.estate = estate
+ config_am.estate = estate
+ config_checks._last_dkg_successful = False
+ assert not econfig.synced(estate)
+
+ sync_config_monitor = SyncConfigMonitor(
+ action_manager=config_am,
+ checks=config_checks
+ )
+ sync_config_monitor.run()
+ assert econfig.synced(estate)
+ # config generation was not triggered because dkg has not been completed
+ assert len(glob.glob(config_pattern)) == 0
diff --git a/tests/schains/monitor/rpc_test.py b/tests/schains/monitor/rpc_test.py
index 702707b20..5445ef887 100644
--- a/tests/schains/monitor/rpc_test.py
+++ b/tests/schains/monitor/rpc_test.py
@@ -1,11 +1,20 @@
+import datetime
+import json
+import mock
from time import sleep
+import freezegun
+import requests
+
from core.schains.monitor.rpc import handle_failed_schain_rpc
from core.schains.runner import get_container_info
+from core.schains.rpc import check_endpoint_blocks
from tools.configs.containers import SCHAIN_CONTAINER
-
from web.models.schain import SChainRecord
+CURRENT_TIMESTAMP = 1594903080
+CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
+
def test_handle_failed_schain_rpc_no_container(schain_db, dutils, skaled_status):
schain_record = SChainRecord.get_by_name(schain_db)
@@ -15,21 +24,17 @@ def test_handle_failed_schain_rpc_no_container(schain_db, dutils, skaled_status)
schain={'name': schain_db},
schain_record=schain_record,
skaled_status=skaled_status,
- dutils=dutils
+ dutils=dutils,
)
assert not dutils.is_container_exists(container_name)
def test_handle_failed_schain_rpc_exit_time_reached(
- schain_db,
- dutils,
- cleanup_schain_containers,
- skaled_status_exit_time_reached
+ schain_db, dutils, cleanup_schain_containers, skaled_status_exit_time_reached
):
schain_record = SChainRecord.get_by_name(schain_db)
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain_db)
+ image_name, container_name, _, _ = get_container_info(SCHAIN_CONTAINER, schain_db)
dutils.run_container(image_name=image_name, name=container_name, entrypoint='bash -c "exit 0"')
sleep(7)
@@ -42,7 +47,7 @@ def test_handle_failed_schain_rpc_exit_time_reached(
schain={'name': schain_db},
schain_record=schain_record,
skaled_status=skaled_status_exit_time_reached,
- dutils=dutils
+ dutils=dutils,
)
assert dutils.is_container_exists(container_name)
@@ -51,20 +56,14 @@ def test_handle_failed_schain_rpc_exit_time_reached(
def test_monitor_schain_downloading_snapshot(
- schain_db,
- dutils,
- cleanup_schain_containers,
- skaled_status_downloading_snapshot
+ schain_db, dutils, cleanup_schain_containers, skaled_status_downloading_snapshot
):
schain_record = SChainRecord.get_by_name(schain_db)
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain_db)
+ image_name, container_name, _, _ = get_container_info(SCHAIN_CONTAINER, schain_db)
dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint='bash -c "sleep 100"'
+ image_name=image_name, name=container_name, entrypoint='bash -c "sleep 100"'
)
sleep(7)
schain_record.set_failed_rpc_count(100)
@@ -76,25 +75,19 @@ def test_monitor_schain_downloading_snapshot(
schain={'name': schain_db},
schain_record=schain_record,
skaled_status=skaled_status_downloading_snapshot,
- dutils=dutils
+ dutils=dutils,
)
container_info = dutils.get_info(container_name)
assert container_info['stats']['State']['FinishedAt'] == finished_at
def test_handle_failed_schain_rpc_stuck_max_retries(
- schain_db,
- dutils,
- skaled_status,
- cleanup_schain_containers
+ schain_db, dutils, skaled_status, cleanup_schain_containers
):
schain_record = SChainRecord.get_by_name(schain_db)
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain_db)
+ image_name, container_name, _, _ = get_container_info(SCHAIN_CONTAINER, schain_db)
dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint='bash -c "sleep 100"'
+ image_name=image_name, name=container_name, entrypoint='bash -c "sleep 100"'
)
schain_record.set_failed_rpc_count(100)
@@ -107,7 +100,7 @@ def test_handle_failed_schain_rpc_stuck_max_retries(
schain={'name': schain_db},
schain_record=schain_record,
skaled_status=skaled_status,
- dutils=dutils
+ dutils=dutils,
)
container_info = dutils.get_info(container_name)
assert container_info['stats']['State']['FinishedAt'] == finished_at
@@ -115,12 +108,9 @@ def test_handle_failed_schain_rpc_stuck_max_retries(
def test_monitor_container_exited(schain_db, dutils, cleanup_schain_containers, skaled_status):
schain_record = SChainRecord.get_by_name(schain_db)
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain_db)
+ image_name, container_name, _, _ = get_container_info(SCHAIN_CONTAINER, schain_db)
dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint='bash -c "exit 100;"'
+ image_name=image_name, name=container_name, entrypoint='bash -c "exit 100;"'
)
schain_record.set_failed_rpc_count(100)
@@ -134,7 +124,7 @@ def test_monitor_container_exited(schain_db, dutils, cleanup_schain_containers,
schain={'name': schain_db},
schain_record=schain_record,
skaled_status=skaled_status,
- dutils=dutils
+ dutils=dutils,
)
assert schain_record.restart_count == 0
container_info = dutils.get_info(container_name)
@@ -142,18 +132,12 @@ def test_monitor_container_exited(schain_db, dutils, cleanup_schain_containers,
def test_handle_failed_schain_rpc_stuck(
- schain_db,
- dutils,
- cleanup_schain_containers,
- skaled_status
+ schain_db, dutils, cleanup_schain_containers, skaled_status
):
schain_record = SChainRecord.get_by_name(schain_db)
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain_db)
+ image_name, container_name, _, _ = get_container_info(SCHAIN_CONTAINER, schain_db)
dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint='bash -c "sleep 100"'
+ image_name=image_name, name=container_name, entrypoint='bash -c "sleep 100"'
)
schain_record.set_failed_rpc_count(100)
@@ -167,8 +151,39 @@ def test_handle_failed_schain_rpc_stuck(
schain={'name': schain_db},
schain_record=schain_record,
skaled_status=skaled_status,
- dutils=dutils
+ dutils=dutils,
)
assert schain_record.restart_count == 1
container_info = dutils.get_info(container_name)
assert container_info['stats']['State']['FinishedAt'] != finished_at
+
+
+@mock.patch('tools.helper.requests.post')
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_check_endpoint_blocks(post_request_mock):
+ endpoint = 'http://127.0.0.1:10003'
+
+ post_request_mock.side_effect = requests.exceptions.RequestException('Test error')
+ assert check_endpoint_blocks(endpoint) is False
+ post_request_mock.side_effect = None
+
+ response_dummy = mock.Mock()
+ post_request_mock.return_value = response_dummy
+
+ response_dummy.json = mock.Mock(return_value={})
+ assert check_endpoint_blocks(endpoint) is False
+
+ response_dummy.json = mock.Mock(
+ side_effect=json.JSONDecodeError('Test error', doc='doc', pos=1)
+ )
+ assert check_endpoint_blocks(endpoint) is False
+
+ response_dummy.json = mock.Mock(return_value={'result': {'timestamp': '0xhhhhh'}})
+ assert check_endpoint_blocks(endpoint) is False
+
+ response_dummy.json = mock.Mock(return_value={'result': {'timestamp': '0x1'}})
+ assert check_endpoint_blocks(endpoint) is False
+
+ hex_offset_ts = hex(CURRENT_TIMESTAMP + 1)
+ response_dummy.json = mock.Mock(return_value={'result': {'timestamp': hex_offset_ts}})
+ assert check_endpoint_blocks(endpoint) is True
diff --git a/tests/schains/monitor/skaled_monitor_test.py b/tests/schains/monitor/skaled_monitor_test.py
index bded111d5..c63cda779 100644
--- a/tests/schains/monitor/skaled_monitor_test.py
+++ b/tests/schains/monitor/skaled_monitor_test.py
@@ -49,7 +49,9 @@ def monitor_schain_container_mock(
skaled_status,
download_snapshot=False,
start_ts=None,
- dutils=None
+ dutils=None,
+ sync_node=False,
+ historic_state=False
):
image_name, container_name, _, _ = get_container_info(
SCHAIN_CONTAINER, schain['name'])
@@ -79,7 +81,8 @@ def skaled_checks(
schain_name=name,
schain_record=schain_record,
rule_controller=rule_controller,
- dutils=dutils
+ dutils=dutils,
+ sync_node=False
)
@@ -94,6 +97,7 @@ def skaled_am(
rotation_data,
secret_key,
ssl_folder,
+ ima_migration_schedule,
dutils,
skaled_checks
):
@@ -418,6 +422,7 @@ def test_get_skaled_monitor_new_node(
ssl_folder,
skaled_status,
skaled_checks,
+ ima_migration_schedule,
dutils
):
name = schain_db
@@ -486,15 +491,27 @@ def test_get_skaled_monitor_recreate(
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
+ schain_record.set_ssl_change_date(datetime.datetime.now())
+ status = skaled_checks.get_all()
- schain_record.set_needs_reload(True)
- mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
- )
- assert mon == RecreateSkaledMonitor
+ with mock.patch('core.schains.ssl.get_ssl_files_change_date',
+ return_value=datetime.datetime.now()):
+ status['skaled_container'] = False
+ mon = get_skaled_monitor(
+ skaled_am,
+ status,
+ schain_record,
+ skaled_status
+ )
+ assert mon == RegularSkaledMonitor
+ status['skaled_container'] = True
+ mon = get_skaled_monitor(
+ skaled_am,
+ status,
+ schain_record,
+ skaled_status
+ )
+ assert mon == RecreateSkaledMonitor
def test_regular_skaled_monitor(
diff --git a/tests/schains/ssl_test.py b/tests/schains/ssl_test.py
index 50f4e2244..3fbf91f88 100644
--- a/tests/schains/ssl_test.py
+++ b/tests/schains/ssl_test.py
@@ -1,7 +1,8 @@
import os
+from datetime import datetime
from tools.configs import NODE_DATA_PATH
-from core.schains.ssl import get_ssl_filepath
+from core.schains.ssl import get_ssl_filepath, get_ssl_files_change_date
def test_get_ssl_filepath(cert_key_pair):
@@ -11,3 +12,11 @@ def test_get_ssl_filepath(cert_key_pair):
assert ssl_key_path == os.path.join(certs_filepath, 'ssl_key')
assert ssl_cert_path == os.path.join(certs_filepath, 'ssl_cert')
+
+
+def test_get_ssl_files_change_date(cert_key_pair):
+ time_now = datetime.now()
+ change_date = get_ssl_files_change_date()
+
+ assert time_now > change_date
+ assert time_now.timestamp() - 1000 < change_date.timestamp()
diff --git a/tests/skale-data/config/ima_migration_schedule.yaml b/tests/skale-data/config/ima_migration_schedule.yaml
deleted file mode 100644
index 9e1a770e7..000000000
--- a/tests/skale-data/config/ima_migration_schedule.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-devnet:
- test_chain: 1688388551
diff --git a/tests/skale-data/config/schain_accounts/devnet/schain-static_chain.json b/tests/skale-data/config/schain_accounts/devnet/schain-static_chain.json
new file mode 100644
index 000000000..e5a14fe30
--- /dev/null
+++ b/tests/skale-data/config/schain_accounts/devnet/schain-static_chain.json
@@ -0,0 +1,7 @@
+{
+ "accounts": {
+ "0x1111111": {
+ "balance": "1000000000000000000000000000000"
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/skale-data/config/schain_accounts/devnet/schain-test.json b/tests/skale-data/config/schain_accounts/devnet/schain-test.json
new file mode 100644
index 000000000..6f50398dd
--- /dev/null
+++ b/tests/skale-data/config/schain_accounts/devnet/schain-test.json
@@ -0,0 +1,7 @@
+{
+ "accounts": {
+ "0x9d8C194E07c628C15C82f06a53cCCf3c7b68810D": {
+ "balance": "1000000000000000000000000000000"
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/skale-data/config/schain_allocation.yml b/tests/skale-data/config/schain_allocation.yml
index 90d5c2e9b..b56a0e15e 100644
--- a/tests/skale-data/config/schain_allocation.yml
+++ b/tests/skale-data/config/schain_allocation.yml
@@ -24,12 +24,6 @@ devnet:
test4:
evm_storage_part: 427499642
logs_storage_part: 284999761
- rotate_after_block:
- large: 1310721
- medium: 40960
- small: 10240
- test: 40960
- test4: 40960
volume:
large:
max_consensus_storage_bytes: 22799980953
@@ -79,12 +73,6 @@ mainnet:
test4:
evm_storage_part: 10687499919
logs_storage_part: 7124999946
- rotate_after_block:
- large: 32768030
- medium: 1024000
- small: 256000
- test: 1024000
- test4: 1024000
volume:
large:
max_consensus_storage_bytes: 569999995699
@@ -134,12 +122,6 @@ qanet:
test4:
evm_storage_part: 1068749659
logs_storage_part: 712499773
- rotate_after_block:
- large: 3276803
- medium: 102400
- small: 25600
- test: 102400
- test4: 102400
volume:
large:
max_consensus_storage_bytes: 56999981875
@@ -189,12 +171,6 @@ testnet:
test4:
evm_storage_part: 1068749659
logs_storage_part: 712499773
- rotate_after_block:
- large: 3276803
- medium: 102400
- small: 25600
- test: 102400
- test4: 102400
volume:
large:
max_consensus_storage_bytes: 56999981875
diff --git a/tests/skale-data/config/static_params.yaml b/tests/skale-data/config/static_params.yaml
index 25566b522..03fafefff 100644
--- a/tests/skale-data/config/static_params.yaml
+++ b/tests/skale-data/config/static_params.yaml
@@ -37,6 +37,9 @@ envs:
docker-compose: 1.27.4
schain:
+ contractStorageZeroValuePatchTimestamp:
+ default: 800000
+ test-schain: 1500000
revertableFSPatchTimestamp: 1000000
contractStoragePatchTimestamp: 1000000
snapshotIntervalSec: 0
@@ -52,6 +55,8 @@ envs:
bindIP: "0.0.0.0"
logLevel: "info"
logLevelConfig: "info"
+ pg-threads: 10
+ pg-threads-limit: 10
small:
minCacheSize: 1000000
maxCacheSize: 2000000
@@ -109,6 +114,9 @@ envs:
docker-compose: 1.27.4
schain:
+ contractStorageZeroValuePatchTimestamp:
+ default: 800000
+ test-schain: 1500000
revertableFSPatchTimestamp: 1000000
contractStoragePatchTimestamp: 1000000
snapshotIntervalSec: 0
@@ -124,6 +132,8 @@ envs:
bindIP: "0.0.0.0"
logLevel: "info"
logLevelConfig: "info"
+ pg-threads: 10
+ pg-threads-limit: 10
small:
minCacheSize: 1000000
maxCacheSize: 2000000
@@ -181,6 +191,9 @@ envs:
docker-compose: 1.27.4
schain:
+ contractStorageZeroValuePatchTimestamp:
+ default: 800000
+ test-schain: 1500000
revertableFSPatchTimestamp: 1000000
contractStoragePatchTimestamp: 1000000
snapshotIntervalSec: 0
@@ -198,6 +211,8 @@ envs:
bindIP: "0.0.0.0"
logLevel: "info"
logLevelConfig: "info"
+ pg-threads: 10
+ pg-threads-limit: 10
small:
minCacheSize: 1000000
maxCacheSize: 2000000
@@ -256,6 +271,9 @@ envs:
docker-compose: 1.27.4
schain:
+ contractStorageZeroValuePatchTimestamp:
+ default: 800000
+ test-schain: 1500000
revertableFSPatchTimestamp: 1000000
contractStoragePatchTimestamp: 1000000
snapshotIntervalSec: 0
@@ -263,6 +281,11 @@ envs:
snapshotDownloadTimeout: 18000
snapshotDownloadInactiveTimeout: 120
+ ima:
+ time_frame:
+ before: 1800
+ after: 900
+
schain_cmd:
["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
@@ -273,6 +296,8 @@ envs:
bindIP: "0.0.0.0"
logLevel: "info"
logLevelConfig: "info"
+ pg-threads: 10
+ pg-threads-limit: 10
small:
minCacheSize: 1000000
maxCacheSize: 2000000
diff --git a/tests/skale-data/config/static_schain_params.json b/tests/skale-data/config/static_schain_params.json
new file mode 100644
index 000000000..422edc9d0
--- /dev/null
+++ b/tests/skale-data/config/static_schain_params.json
@@ -0,0 +1,78 @@
+{
+ "context_contract": {
+ "address": "0xD2001000000000000000000000000000000000D2",
+ "bytecode": "0x608060405234801561001057600080fd5b506004361061005e576000357c01000000000000000000000000000000000000000000000000000000009004806315cd7a2d1461006357806367091a01146100e657806383e781fe1461012a575b600080fd5b61006b610174565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100ab578082015181840152602081019050610090565b50505050905090810190601f1680156100d85780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610128600480360360208110156100fc57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610216565b005b6101326102b2565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b606060018054600181600116156101000203166002900480601f01602080910402602001604051908101604052809291908181526020018280546001816001161561010002031660029004801561020c5780601f106101e15761010080835404028352916020019161020c565b820191906000526020600020905b8154815290600101906020018083116101ef57829003601f168201915b5050505050905090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461026f57600080fd5b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690509056fea165627a7a723058203d58598dee9113d96d02a8821205cf2af6e04d52d4d3b52f4563f77936acb5490029"
+ },
+ "deploy_controller": {
+ "address": "0xD2002000000000000000000000000000000000D2",
+ "bytecode": "0x608060405234801561001057600080fd5b50600436106100a95760003560e01c80638ab1d681116100715780638ab1d6811461017657806391d1485414610192578063a217fddf146101c2578063d547741f146101e0578063e43252d7146101fc578063ecd0026114610218576100a9565b806301ffc9a7146100ae57806313f44d10146100de578063248a9ca31461010e5780632f2ff15d1461013e57806336568abe1461015a575b600080fd5b6100c860048036038101906100c39190610877565b610236565b6040516100d59190610927565b60405180910390f35b6100f860048036038101906100f391906107e9565b6102b0565b6040516101059190610927565b60405180910390f35b61012860048036038101906101239190610812565b610307565b6040516101359190610942565b60405180910390f35b6101586004803603810190610153919061083b565b610326565b005b610174600480360381019061016f919061083b565b61038c565b005b610190600480360381019061018b91906107e9565b61040f565b005b6101ac60048036038101906101a7919061083b565b61043c565b6040516101b99190610927565b60405180910390f35b6101ca6104a6565b6040516101d79190610942565b60405180910390f35b6101fa60048036038101906101f5919061083b565b6104ad565b005b610216600480360381019061021191906107e9565b610513565b005b610220610540565b60405161022d9190610942565b60405180910390f35b60007f7965db0b000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614806102a957506102a882610564565b5b9050919050565b60006102bf6000801b8361043c565b806102f057506102ef7ffc425f2263d0df187444b70e47283d622c70181c5baebb1306a01edba1ce184c8361043c565b5b8061030057506102ff826105ce565b5b9050919050565b6000806000838152602001908152602001600020600101549050919050565b61033f61033283610307565b61033a6105e1565b61043c565b61037e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103759061095d565b60405180910390fd5b61038882826105e9565b5050565b6103946105e1565b73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614610401576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016103f89061099d565b60405180910390fd5b61040b82826106c9565b5050565b6104397ffc425f2263d0df187444b70e47283d622c70181c5baebb1306a01edba1ce184c826104ad565b50565b600080600084815260200190815260200160002060000160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060009054906101000a900460ff16905092915050565b6000801b81565b6104c66104b983610307565b6104c16105e1565b61043c565b610505576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016104fc9061097d565b60405180910390fd5b61050f82826106c9565b5050565b61053d7ffc425f2263d0df187444b70e47283d622c70181c5baebb1306a01edba1ce184c82610326565b50565b7ffc425f2263d0df187444b70e47283d622c70181c5baebb1306a01edba1ce184c81565b60007f01ffc9a7000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916827bffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916149050919050565b600080823b905060008111915050919050565b600033905090565b6105f3828261043c565b6106c557600160008084815260200190815260200160002060000160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555061066a6105e1565b73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16837f2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d60405160405180910390a45b5050565b6106d3828261043c565b156107a657600080600084815260200190815260200160002060000160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060006101000a81548160ff02191690831515021790555061074b6105e1565b73ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16837ff6391f5c32d9c69d2a47ea670b442974b53935d1edc7fd64eb21e047a839171b60405160405180910390a45b5050565b6000813590506107b981610b2f565b92915050565b6000813590506107ce81610b46565b92915050565b6000813590506107e381610b5d565b92915050565b6000602082840312156107fb57600080fd5b6000610809848285016107aa565b91505092915050565b60006020828403121561082457600080fd5b6000610832848285016107bf565b91505092915050565b6000806040838503121561084e57600080fd5b600061085c858286016107bf565b925050602061086d858286016107aa565b9150509250929050565b60006020828403121561088957600080fd5b6000610897848285016107d4565b91505092915050565b6108a9816109e0565b82525050565b6108b8816109ec565b82525050565b60006108cb602f836109bd565b91506108d682610a42565b604082019050919050565b60006108ee6030836109bd565b91506108f982610a91565b604082019050919050565b6000610911602f836109bd565b915061091c82610ae0565b604082019050919050565b600060208201905061093c60008301846108a0565b92915050565b600060208201905061095760008301846108af565b92915050565b60006020820190508181036000830152610976816108be565b9050919050565b60006020820190508181036000830152610996816108e1565b9050919050565b600060208201905081810360008301526109b681610904565b9050919050565b600082825260208201905092915050565b60006109d982610a22565b9050919050565b60008115159050919050565b6000819050919050565b60007fffffffff0000000000000000000000000000000000000000000000000000000082169050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b7f416363657373436f6e74726f6c3a2073656e646572206d75737420626520616e60008201527f2061646d696e20746f206772616e740000000000000000000000000000000000602082015250565b7f416363657373436f6e74726f6c3a2073656e646572206d75737420626520616e60008201527f2061646d696e20746f207265766f6b6500000000000000000000000000000000602082015250565b7f416363657373436f6e74726f6c3a2063616e206f6e6c792072656e6f756e636560008201527f20726f6c657320666f722073656c660000000000000000000000000000000000602082015250565b610b38816109ce565b8114610b4357600080fd5b50565b610b4f816109ec565b8114610b5a57600080fd5b50565b610b66816109f6565b8114610b7157600080fd5b5056fea26469706673582212200cf6e93757fc9f992b5f84689875e3f806ebc8fd04f7e7295a053d5ec4ef52ee64736f6c63430008030033"
+ },
+ "current_node_info": {
+ "bind_ip": "0.0.0.0",
+ "log_level": "info",
+ "log_level_config": "info",
+ "pg_threads": 10,
+ "pg_threads_limit": 10
+ },
+ "cache_options": {
+ "small": {
+ "min_cache_size": 1000000,
+ "max_cache_size": 2000000,
+ "collection_queue_size": 2,
+ "collection_duration": 10,
+ "transaction_queue_size": 1000,
+ "max_open_leveldb_files": 25
+ },
+ "medium": {
+ "min_cache_size": 32000000,
+ "max_cache_size": 64000000,
+ "collection_queue_size": 20,
+ "collection_duration": 60,
+ "transaction_queue_size": 100000,
+ "max_open_leveldb_files": 256
+ },
+ "sync_node": {
+ "min_cache_size": 32000000,
+ "max_cache_size": 64000000,
+ "collection_queue_size": 20,
+ "collection_duration": 60,
+ "transaction_queue_size": 100000,
+ "max_open_leveldb_files": 256
+ },
+ "large": {
+ "min_cache_size": 32000000,
+ "max_cache_size": 64000000,
+ "collection_queue_size": 20,
+ "collection_duration": 60,
+ "transaction_queue_size": 100000,
+ "max_open_leveldb_files": 256
+ },
+ "test": {
+ "min_cache_size": 32000000,
+ "max_cache_size": 64000000,
+ "collection_queue_size": 20,
+ "collection_duration": 60,
+ "transaction_queue_size": 100000,
+ "max_open_leveldb_files": 256
+ },
+ "test4": {
+ "min_cache_size": 32000000,
+ "max_cache_size": 64000000,
+ "collection_queue_size": 20,
+ "collection_duration": 60,
+ "transaction_queue_size": 100000,
+ "max_open_leveldb_files": 256
+ }
+ },
+ "schain": {
+ "snapshot_interval_sec": 0,
+ "empty_block_interval_ms": 10,
+ "free_contract_deployment": true
+ },
+ "schain_cmd": [
+ "-v 2",
+ "--web3-trace",
+ "--enable-debug-behavior-apis",
+ "--aa no"
+ ]
+}
diff --git a/tests/skale-data/node_data/node_options.json b/tests/skale-data/node_data/node_options.json
new file mode 100644
index 000000000..0fb23b3ab
--- /dev/null
+++ b/tests/skale-data/node_data/node_options.json
@@ -0,0 +1,5 @@
+{
+ "catchup": false,
+ "archive": true,
+ "historic_state": false
+}
\ No newline at end of file
diff --git a/tests/skale-data/node_data/resource_allocation.json b/tests/skale-data/node_data/resource_allocation.json
index fc94a4653..b0733174c 100644
--- a/tests/skale-data/node_data/resource_allocation.json
+++ b/tests/skale-data/node_data/resource_allocation.json
@@ -5,16 +5,19 @@
"test": 22,
"small": 5,
"medium": 22,
- "large": 716
+ "large": 716,
+ "sync_node": 716
},
"mem": {
"test4": 300647710,
"test": 300647710,
"small": 75161927,
"medium": 300647710,
- "large": 9620726743
+ "large": 9620726743,
+ "sync_node": 9620726743
},
"disk": {
+ "sync_node": 75999936512,
"large": 75999936512,
"medium": 2374998016,
"small": 593749504,
@@ -22,6 +25,12 @@
"test4": 2374998016
},
"volume_limits": {
+ "sync_node": {
+ "max_consensus_storage_bytes": 22799980953,
+ "max_file_storage_bytes": 22799980953,
+ "max_reserved_storage_bytes": 7599993651,
+ "max_skaled_leveldb_storage_bytes": 22799980953
+ },
"large": {
"max_consensus_storage_bytes": 22799980953,
"max_file_storage_bytes": 22799980953,
@@ -54,6 +63,10 @@
}
},
"leveldb_limits": {
+ "sync_node": {
+ "contract_storage": 13679988571,
+ "db_storage": 9119992381
+ },
"large": {
"contract_storage": 13679988571,
"db_storage": 9119992381
@@ -82,14 +95,16 @@
"test": 9,
"small": 2,
"medium": 9,
- "large": 307
+ "large": 307,
+ "sync_node": 307
},
"mem": {
"test4": 128849018,
"test": 128849018,
"small": 32212254,
"medium": 128849018,
- "large": 4123168604
+ "large": 4123168604,
+ "sync_node": 4123168604
}
}
}
\ No newline at end of file
diff --git a/tests/test_generate_config_sync.py b/tests/test_generate_config_sync.py
new file mode 100644
index 000000000..00b95ba3f
--- /dev/null
+++ b/tests/test_generate_config_sync.py
@@ -0,0 +1,65 @@
+import json
+import pytest
+from skale.schain_config.rotation_history import get_previous_schain_groups
+
+from core.schains.config.predeployed import generate_predeployed_accounts
+from core.schains.config.precompiled import generate_precompiled_accounts
+
+from core.schains.limits import get_schain_type
+from core.schains.config.generator import (
+ get_on_chain_owner, get_schain_originator, SChainBaseConfig)
+
+from tools.helper import is_address_contract
+from tools.configs.schains import BASE_SCHAIN_CONFIG_FILEPATH
+
+
+CHAINS = []
+
+
+@pytest.mark.skip(reason="test only used to generate static accounts for a sync node")
+def test_generate_config(skale):
+ for schain_name in CHAINS:
+
+ schain = skale.schains.get_by_name(schain_name)
+ schain_type = get_schain_type(schain['partOfNode'])
+
+ node_groups = get_previous_schain_groups(skale, schain_name)
+ original_group = node_groups[0]['nodes']
+
+ schain_nodes_with_schains = []
+ for key, value in original_group.items():
+ schain_nodes_with_schains.append({
+ 'id': int(key),
+ 'publicKey': value[2]
+ })
+
+ is_owner_contract = is_address_contract(skale.web3, schain['mainnetOwner'])
+ on_chain_owner = get_on_chain_owner(schain, schain['generation'], is_owner_contract)
+
+ mainnet_owner = schain['mainnetOwner']
+
+ originator_address = get_schain_originator(schain)
+
+ precompiled_accounts = generate_precompiled_accounts(
+ on_chain_owner=on_chain_owner
+ )
+
+ base_config = SChainBaseConfig(BASE_SCHAIN_CONFIG_FILEPATH)
+
+ predeployed_accounts = generate_predeployed_accounts(
+ schain_name=schain['name'],
+ schain_type=schain_type,
+ schain_nodes=schain_nodes_with_schains,
+ on_chain_owner=on_chain_owner,
+ mainnet_owner=mainnet_owner,
+ originator_address=originator_address,
+ generation=schain['generation']
+ )
+
+ accounts = {
+ **base_config.config['accounts'],
+ **predeployed_accounts,
+ **precompiled_accounts,
+ }
+ with open(f'accounts/schain-{schain_name}.json', 'w') as outfile:
+ json.dump({'accounts': accounts}, outfile, indent=4)
diff --git a/tests/utils.py b/tests/utils.py
index fab6628ff..29f37e745 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -54,6 +54,8 @@
IpRange('3.3.3.3', '4.4.4.4')
]
+IMA_MIGRATION_TS = 1688388551
+
class FailedAPICall(Exception):
pass
@@ -138,7 +140,7 @@ def run_simple_schain_container_in_sync_mode(schain_data: dict,
def run_simple_ima_container(schain: dict, dutils: DockerUtils):
- image = get_image_name(type=IMA_CONTAINER)
+ image = get_image_name(image_type=IMA_CONTAINER)
run_ima_container(schain, mainnet_chain_id=1, image=image, dutils=dutils)
diff --git a/tools/configs/__init__.py b/tools/configs/__init__.py
index 8ec237b62..0fa95de31 100644
--- a/tools/configs/__init__.py
+++ b/tools/configs/__init__.py
@@ -20,6 +20,7 @@
SCHAIN_CONFIG_DIR_SKALED = '/schain_config'
CONFIG_FOLDER_NAME = 'config'
CONTRACTS_INFO_FOLDER_NAME = 'contracts_info'
+SKALE_LIB_PATH = os.getenv('SKALE_LIB_PATH', '/var/lib/skale')
MANAGER_CONTRACTS_INFO_NAME = 'manager.json'
IMA_CONTRACTS_INFO_NAME = 'ima.json'
@@ -28,6 +29,8 @@
CONTRACTS_INFO_FOLDER = os.path.join(SKALE_VOLUME_PATH, CONTRACTS_INFO_FOLDER_NAME)
CONFIG_FOLDER = os.path.join(SKALE_VOLUME_PATH, CONFIG_FOLDER_NAME)
+STATIC_ACCOUNTS_FOLDER = os.path.join(CONFIG_FOLDER, 'schain_accounts')
+
FLASK_SECRET_KEY_FILENAME = 'flask_db_key.txt'
FLASK_SECRET_KEY_FILE = os.path.join(NODE_DATA_PATH, FLASK_SECRET_KEY_FILENAME)
@@ -37,6 +40,12 @@
SSL_CERTIFICATES_FILENAME = 'ssl'
SSL_CERTIFICATES_FILEPATH = os.path.join(NODE_DATA_PATH, SSL_CERTIFICATES_FILENAME)
+SSL_KEY_NAME = 'ssl_key'
+SSL_CRT_NAME = 'ssl_cert'
+
+SSL_KEY_PATH = os.path.join(SSL_CERTIFICATES_FILEPATH, SSL_KEY_NAME)
+SSL_CERT_PATH = os.path.join(SSL_CERTIFICATES_FILEPATH, SSL_CRT_NAME)
+
BACKUP_RUN = os.getenv('BACKUP_RUN', False)
SGX_SERVER_URL = os.environ.get('SGX_SERVER_URL')
@@ -90,3 +99,7 @@
PULL_CONFIG_FOR_SCHAIN = os.getenv('PULL_CONFIG_FOR_SCHAIN')
CHANGE_IP_DELAY = 300
+
+STATSD_HOST = '127.0.0.1'
+STATSD_PORT = 8125
+SYNC_NODE = os.getenv('SYNC_NODE') == 'True'
diff --git a/tools/configs/containers.py b/tools/configs/containers.py
index f4052a3ed..1ae8249db 100644
--- a/tools/configs/containers.py
+++ b/tools/configs/containers.py
@@ -55,3 +55,5 @@
MAX_SCHAIN_RESTART_COUNT = int(os.getenv('MAX_SCHAIN_RESTART_COUNT', 5))
CONTAINER_LOGS_SEPARATOR = b'=' * 80 + b'\n'
+
+HISTORIC_STATE_IMAGE_POSTFIX = '-historic'
diff --git a/tools/configs/filebeat.py b/tools/configs/filebeat.py
deleted file mode 100644
index 006cb03a9..000000000
--- a/tools/configs/filebeat.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import os
-from tools.configs import CONFIG_FOLDER, NODE_DATA_PATH
-
-FILEBEAT_TEMPLATE_PATH = os.path.join(CONFIG_FOLDER, 'filebeat.yml.j2')
-FILEBEAT_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'filebeat.yml')
-
-FILEBEAT_CONTAINER_NAME = 'skale_filebeat'
-
-MONITORING_CONTAINERS = os.getenv('MONITORING_CONTAINERS') == 'True'
diff --git a/tools/configs/ima.py b/tools/configs/ima.py
index 2f9b5edaa..94ceefa42 100644
--- a/tools/configs/ima.py
+++ b/tools/configs/ima.py
@@ -24,12 +24,9 @@
SCHAIN_CONFIG_DIR_SKALED
)
-IMA_ENDPOINT = os.environ['IMA_ENDPOINT']
MAINNET_IMA_ABI_FILEPATH = os.getenv('MAINNET_IMA_ABI_FILEPATH') or \
os.path.join(CONTRACTS_INFO_FOLDER, IMA_CONTRACTS_INFO_NAME)
-DISABLE_IMA = os.getenv('DISABLE_IMA') == 'True'
-
IMA_NETWORK_BROWSER_FILENAME = 'ima_network_browser_data.json'
IMA_NETWORK_BROWSER_FILEPATH = os.path.join(SCHAIN_CONFIG_DIR_SKALED, IMA_NETWORK_BROWSER_FILENAME)
@@ -91,4 +88,4 @@
}
}
-IMA_TIME_FRAMING = 1800 # 30 min
+DEFAULT_TIME_FRAME = 1800 # 30 min
diff --git a/tools/configs/logs.py b/tools/configs/logs.py
index 6ccb58043..d21c8da41 100644
--- a/tools/configs/logs.py
+++ b/tools/configs/logs.py
@@ -32,6 +32,10 @@
DEBUG_LOG_FILENAME = 'debug.log'
DEBUG_LOG_PATH = os.path.join(LOG_FOLDER, DEBUG_LOG_FILENAME)
+SYNC_LOG_FILENAME = 'sync_node.log'
+SYNC_LOG_PATH = os.path.join(LOG_FOLDER, SYNC_LOG_FILENAME)
+
+
REMOVED_CONTAINERS_FOLDER_NAME = '.removed_containers'
REMOVED_CONTAINERS_FOLDER_PATH = os.path.join(
LOG_FOLDER,
diff --git a/tools/configs/monitoring.py b/tools/configs/monitoring.py
new file mode 100644
index 000000000..8bbfc5d8c
--- /dev/null
+++ b/tools/configs/monitoring.py
@@ -0,0 +1,21 @@
+import os
+from tools.configs import CONFIG_FOLDER, NODE_DATA_PATH
+
+FILEBEAT_TEMPLATE_PATH = os.path.join(CONFIG_FOLDER, 'filebeat.yml.j2')
+FILEBEAT_CONFIG_PATH = os.path.join(NODE_DATA_PATH, 'filebeat.yml')
+
+FILEBEAT_CONTAINER_NAME = 'skale_filebeat'
+
+MONITORING_CONTAINERS = os.getenv('MONITORING_CONTAINERS') == 'True'
+
+INFLUX_URL = os.getenv('INFLUX_URL')
+
+TELEGRAF = os.getenv('TELEGRAF') == 'True'
+
+TELEGRAF_TEMPLATE_PATH = os.path.join(CONFIG_FOLDER, 'telegraf.conf.j2')
+TELEGRAF_CONFIG_PATH = os.path.join(CONFIG_FOLDER, 'telegraf.conf')
+
+TELEGRAF_CONTAINER_NAME = 'skale_telegraf'
+TELEGRAF_SERVICE_NAME = 'telegraf'
+TELEGRAF_IMAGE = 'telegraf:1.27.4'
+TELEGRAF_MEM_LIMIT = os.getenv('TELEGRAF_MEM_LIMIT', '1GB')
diff --git a/tools/configs/schains.py b/tools/configs/schains.py
index fa07270f7..566709ca8 100644
--- a/tools/configs/schains.py
+++ b/tools/configs/schains.py
@@ -18,7 +18,12 @@
# along with this program. If not, see .
import os
-from tools.configs import NODE_DATA_PATH, CONFIG_FOLDER, NODE_DATA_PATH_HOST
+from tools.configs import (
+ CONFIG_FOLDER,
+ NODE_DATA_PATH,
+ NODE_DATA_PATH_HOST,
+ SKALE_LIB_PATH
+)
SCHAINS_DIR_NAME = 'schains'
SCHAINS_DIR_PATH = os.path.join(NODE_DATA_PATH, SCHAINS_DIR_NAME)
@@ -40,5 +45,11 @@
SKALED_STATUS_FILENAME = 'skaled.status'
+STATIC_SCHAIN_DIR_NAME = 'schains'
+SCHAIN_STATE_PATH = os.path.join(SKALE_LIB_PATH, 'schains')
+SCHAIN_STATIC_PATH = os.path.join(SKALE_LIB_PATH, 'filestorage')
+
DEFAULT_RPC_CHECK_TIMEOUT = 30
RPC_CHECK_TIMEOUT_STEP = 10
+
+MAX_CONSENSUS_STORAGE_INF_VALUE = 1000000000000000000
diff --git a/tools/docker_utils.py b/tools/docker_utils.py
index 3dbbceb3d..00378fdbf 100644
--- a/tools/docker_utils.py
+++ b/tools/docker_utils.py
@@ -47,6 +47,7 @@
)
from tools.configs.logs import REMOVED_CONTAINERS_FOLDER_PATH
+
logger = logging.getLogger(__name__)
MAX_RETRIES = 12
@@ -69,7 +70,11 @@ def inner(*args, **kwargs) -> list:
res.append({
'image': container.attrs['Config']['Image'],
'name': re.sub('/', '', container.attrs['Name']),
- 'state': container.attrs['State']
+ 'state': container.attrs['State'],
+ 'cpu_shares': container.attrs['HostConfig']['CpuShares'],
+ 'mem_limit': container.attrs['HostConfig']['Memory'],
+ 'swap_limit': container.attrs['HostConfig']['MemorySwap'],
+ 'swappiness': container.attrs['HostConfig']['MemorySwappiness']
})
return res
@@ -148,7 +153,7 @@ def get_containers_info(self, all=False, name_filter='*', format=False) -> list:
def get_all_ima_containers(self, all=False, format=False) -> list:
return self.client.containers.list(all=all, filters={'name': 'skale_ima_*'})
- def get_info(self, container_id: str) -> dict:
+ def get_info(self, container_id: str, raise_not_found: bool = False) -> dict:
container_info = {}
try:
container = self.client.containers.get(container_id)
@@ -157,6 +162,8 @@ def get_info(self, container_id: str) -> dict:
container_info['stats'] = self.cli.inspect_container(container.id)
container_info['status'] = container.status
except docker.errors.NotFound:
+ if raise_not_found:
+ raise
logger.debug(
f'Can not get info - no such container: {container_id}')
container_info['status'] = CONTAINER_NOT_FOUND
@@ -189,7 +196,7 @@ def get_vol(self, name: str) -> Volume:
try:
return self.client.volumes.get(name)
except docker.errors.NotFound:
- logger.debug(f'Volume {name} is not exist')
+ logger.debug(f'Volume {name} does not exist')
return None
def rm_vol(self, name: str, retry_lvmpy_error: bool = True) -> None:
@@ -261,7 +268,8 @@ def display_container_logs(
self,
container_name: Container,
head: int = 100,
- tail: int = 200
+ tail: int = 200,
+ to_logger: bool = True
) -> str:
container = self.safe_get_container(container_name)
if not container:
@@ -273,7 +281,10 @@ def display_container_logs(
)
pretext = f'container {container_name} logs: \n'
logs = (head_lines + CONTAINER_LOGS_SEPARATOR + tail_lines).decode("utf-8")
- logger.info(pretext + logs)
+ if to_logger:
+ logger.info(pretext + logs)
+ else:
+ print(pretext + logs)
return logs
@classmethod
@@ -387,6 +398,15 @@ def get_container_image_name(self, name: str) -> Optional[str]:
return None
return info['stats']['Config']['Image']
+ def get_container_env_value(self, container_name: str, env_option: str) -> Optional[str]:
+ info = self.get_info(container_name, raise_not_found=True)
+ env = info['stats']['Config']['Env']
+ try:
+ value = next(filter(lambda v: v.startswith(env_option), env))
+ except StopIteration:
+ return None
+ return value.split('=')[1]
+
def wait_for_container_creation(self, name: str, timeout=CONTAINER_CREATION_TIMEOUT):
start_ts = time.time()
while time.time() - start_ts < timeout and not self.is_container_exists(name):
diff --git a/tools/helper.py b/tools/helper.py
index c19538c59..71b788f4f 100644
--- a/tools/helper.py
+++ b/tools/helper.py
@@ -18,18 +18,16 @@
# along with this program. If not, see .
import os
+import itertools
import json
-import time
-import psutil
-
-import yaml
import logging
-import itertools
+import psutil
import subprocess
+import time
from subprocess import PIPE
import requests
-
+import yaml
from filelock import FileLock
from jinja2 import Environment
from skale import Skale
@@ -38,6 +36,7 @@
from tools.configs import INIT_LOCK_PATH
from tools.configs.web3 import ENDPOINT, ABI_FILEPATH, STATE_FILEPATH, ZERO_ADDRESS
+
logger = logging.getLogger(__name__)
POST_REQUEST_TIMEOUT = 30
@@ -185,3 +184,7 @@ def is_zero_address(address: str) -> bool:
def is_address_contract(web3, address) -> bool:
"""Returns true if contract is deployed at the requested address"""
return web3.eth.get_code(address) != b''
+
+
+def no_hyphens(name: str) -> str:
+ return name.replace('-', '_')
diff --git a/tools/logger.py b/tools/logger.py
index ae91f6698..5c82b2fd9 100644
--- a/tools/logger.py
+++ b/tools/logger.py
@@ -31,6 +31,7 @@
ADMIN_LOG_FORMAT,
ADMIN_LOG_PATH,
API_LOG_FORMAT, API_LOG_PATH,
+ SYNC_LOG_PATH,
DEBUG_LOG_PATH,
LOG_FILE_SIZE_BYTES,
LOG_BACKUP_COUNT
@@ -124,3 +125,7 @@ def init_admin_logger():
def init_api_logger():
init_logger(API_LOG_FORMAT, API_LOG_PATH)
+
+
+def init_sync_logger():
+ init_logger(ADMIN_LOG_FORMAT, SYNC_LOG_PATH)
diff --git a/tools/node_options.py b/tools/node_options.py
index 85ee7cca3..351218f12 100644
--- a/tools/node_options.py
+++ b/tools/node_options.py
@@ -29,3 +29,15 @@
class NodeOptions(JsonObject):
def __init__(self):
super().__init__(filepath=NODE_OPTIONS_FILEPATH)
+
+ @property
+ def archive(self) -> bool:
+ return self._get('archive')
+
+ @property
+ def catchup(self) -> bool:
+ return self._get('catchup')
+
+ @property
+ def historic_state(self) -> bool:
+ return self._get('historic_state')
diff --git a/tools/db.py b/tools/resources.py
similarity index 64%
rename from tools/db.py
rename to tools/resources.py
index b6ae9be90..7a63592bf 100644
--- a/tools/db.py
+++ b/tools/resources.py
@@ -1,15 +1,20 @@
import redis
+import statsd
from peewee import SqliteDatabase
from tools.configs.db import DB_FILE, DB_PRAGMAS, REDIS_URI
+from tools.configs import STATSD_HOST, STATSD_PORT
db = SqliteDatabase(DB_FILE, DB_PRAGMAS)
+cpool: redis.ConnectionPool = redis.ConnectionPool.from_url(REDIS_URI)
+rs: redis.Redis = redis.Redis(connection_pool=cpool)
+statsd_client = statsd.StatsClient(STATSD_HOST, STATSD_PORT)
def get_database():
return db
-cpool: redis.ConnectionPool = redis.ConnectionPool.from_url(REDIS_URI)
-rs: redis.Redis = redis.Redis(connection_pool=cpool)
+def get_statsd_client():
+ return statsd_client
diff --git a/tools/wallet_utils.py b/tools/wallet_utils.py
index 3faca9e6a..59923887c 100644
--- a/tools/wallet_utils.py
+++ b/tools/wallet_utils.py
@@ -31,7 +31,7 @@
SGX_SERVER_URL
)
from tools.configs.web3 import ENDPOINT
-from tools.db import rs as grs
+from tools.resources import rs as grs
logger = logging.getLogger(__name__)
diff --git a/web/migrations.py b/web/migrations.py
index b6d9971fe..3341a49bf 100644
--- a/web/migrations.py
+++ b/web/migrations.py
@@ -18,12 +18,13 @@
# along with this program. If not, see .
import logging
+from datetime import datetime
from playhouse.migrate import SqliteMigrator, migrate as playhouse_migrate
from peewee import DateTimeField, IntegerField, BooleanField, CharField
from web.models.schain import DEFAULT_CONFIG_VERSION
-from tools.db import get_database
+from tools.resources import get_database
logger = logging.getLogger(__name__)
@@ -55,6 +56,8 @@ def run_migrations(db, migrator):
add_restart_count_field(db, migrator)
add_failed_rpc_count_field(db, migrator)
+ # 2.1/2.2 -> 2.3/sync update fields
+ add_ssl_change_date_field(db, migrator)
# 2.3 -> 2.4 update fields
add_failed_snapshot_from(db, migrator)
@@ -119,6 +122,13 @@ def add_failed_rpc_count_field(db, migrator):
)
+def add_ssl_change_date_field(db, migrator):
+ add_column(
+ db, migrator, 'SChainRecord', 'ssl_change_date',
+ DateTimeField(default=datetime.now())
+ )
+
+
def add_failed_snapshot_from(db, migrator):
add_column(
db, migrator, 'SChainRecord', 'snapshot_from',
diff --git a/web/models/base.py b/web/models/base.py
index 2a94ebc63..9d9dd5d93 100644
--- a/web/models/base.py
+++ b/web/models/base.py
@@ -18,7 +18,7 @@
# along with this program. If not, see .
from peewee import Model
-from tools.db import get_database
+from tools.resources import get_database
class BaseModel(Model):
diff --git a/web/models/schain.py b/web/models/schain.py
index 8fcef8ae3..a7f67eb79 100644
--- a/web/models/schain.py
+++ b/web/models/schain.py
@@ -52,6 +52,8 @@ class SChainRecord(BaseModel):
restart_count = IntegerField(default=0)
failed_rpc_count = IntegerField(default=0)
+ ssl_change_date = DateTimeField(default=datetime.now())
+
@classmethod
def add(cls, name):
try:
@@ -95,7 +97,14 @@ def to_dict(cls, record):
'needs_reload': record.needs_reload,
'monitor_last_seen': record.monitor_last_seen.timestamp(),
'monitor_id': record.monitor_id,
- 'config_version': record.config_version
+ 'config_version': record.config_version,
+ 'ssl_change_date': record.ssl_change_date.timestamp(),
+ 'repair_mode': record.repair_mode,
+ 'backup_run': record.backup_run,
+ 'sync_config_run': record.sync_config_run,
+ 'snapshot_from': record.snapshot_from,
+ 'restart_count': record.restart_count,
+ 'failed_rpc_count': record.failed_rpc_count
}
def upload(self, *args, **kwargs) -> None:
@@ -183,6 +192,11 @@ def reset_failed_counters(self) -> None:
self.set_restart_count(0)
self.set_failed_rpc_count(0)
+ def set_ssl_change_date(self, value: datetime) -> None:
+ logger.info(f'Changing ssl_change_date for {self.name} to {value}')
+ self.ssl_change_date = value
+ self.save()
+
def is_dkg_done(self) -> bool:
return self.dkg_status == DKGStatus.DONE
diff --git a/web/routes/health.py b/web/routes/health.py
index d2c8d1725..2503d674b 100644
--- a/web/routes/health.py
+++ b/web/routes/health.py
@@ -112,7 +112,9 @@ def schains_checks():
rotation_id=rotation_id,
stream_version=stream_version,
current_nodes=current_nodes,
- estate=estate
+ last_dkg_successful=True,
+ estate=estate,
+ sync_node=False
).get_all(needed=checks_filter)
checks.append({
'name': schain['name'],