diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index 31625095..bc19da5f 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -30,10 +30,17 @@ jobs:
python -m pip install --upgrade pip
pip install -e .[dev]
+ - name: Generate info
+ run: ./scripts/generate_info.sh 1.0.0 my-branch normal
+
- name: Lint with flake8
run: |
flake8 .
+ - name: Check with ruff
+ run: |
+ ruff check
+
- name: Build binary - normal
run: |
mkdir -p ./dist
diff --git a/.gitignore b/.gitignore
index de02cf68..9ce8b81c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -119,4 +119,6 @@ resource_allocation.json
conf.json
test-env
-nginx.conf
\ No newline at end of file
+nginx.conf
+tests/.skale/node_data/docker.json
+tests/.skale/node_data/node_options.json
diff --git a/README.md b/README.md
index 39762a67..e9bbc11f 100644
--- a/README.md
+++ b/README.md
@@ -8,30 +8,29 @@ SKALE Node CLI, part of the SKALE suite of validator tools, is the command line
## Table of Contents
-1. [Installation](#installation)
-2. [CLI usage](#cli-usage)
- 2.1 [Top level commands](#top-level-commands)
- 2.2 [Node](#node-commands)
- 2.3 [Wallet](#wallet-commands)
- 2.4 [sChains](#schain-commands)
- 2.5 [Health](#health-commands)
- 2.6 [SSL](#ssl-commands)
- 2.7 [Logs](#logs-commands)
- 2.8 [Resources allocation](#resources-allocation-commands)
- 2.9 [Validate](#validate-commands)
-3. [Sync CLI usage](#sync-cli-usage)
- 3.1 [Top level commands](#top-level-commands-sync)
- 3.2 [Sync node commands](#sync-node-commands)
-4. [Exit codes](#exit-codes)
-5. [Development](#development)
+1. [Installation](#installation)
+2. [CLI usage](#cli-usage)
+ 2.1 [Top level commands](#top-level-commands)
+ 2.2 [Node](#node-commands)
+ 2.3 [Wallet](#wallet-commands)
+ 2.4 [sChains](#schain-commands)
+ 2.5 [Health](#health-commands)
+ 2.6 [SSL](#ssl-commands)
+ 2.7 [Logs](#logs-commands)
+ 2.8 [Resources allocation](#resources-allocation-commands)
+3. [Sync CLI usage](#sync-cli-usage)
+ 3.1 [Top level commands](#top-level-commands-sync)
+ 3.2 [Sync node commands](#sync-node-commands)
+4. [Exit codes](#exit-codes)
+5. [Development](#development)
## Installation
-- Prerequisites
+- Prerequisites
Ensure that the following package is installed: **docker**, **docker-compose** (1.27.4+)
-- Download the executable
+- Download the executable
```shell
VERSION_NUM={put the version number here} && sudo -E bash -c "curl -L https://github.com/skalenetwork/node-cli/releases/download/$VERSION_NUM/skale-$VERSION_NUM-`uname -s`-`uname -m` > /usr/local/bin/skale"
@@ -43,13 +42,13 @@ For Sync node version:
VERSION_NUM={put the version number here} && sudo -E bash -c "curl -L https://github.com/skalenetwork/node-cli/releases/download/$VERSION_NUM/skale-$VERSION_NUM-`uname -s`-`uname -m`-sync > /usr/local/bin/skale"
```
-- Apply executable permissions to the downloaded binary:
+- Apply executable permissions to the downloaded binary:
```shell
chmod +x /usr/local/bin/skale
```
-- Test the installation
+- Test the installation
```shell
skale --help
@@ -77,7 +76,7 @@ skale version
Options:
-- `--short` - prints version only, without additional text.
+- `--short` - prints version only, without additional text.
### Node commands
@@ -99,7 +98,7 @@ Options:
Initialize a SKALE node on current machine
-> :warning: **Please avoid re-initialization**: First run `skale node info` to confirm current state of intialization.
+> :warning: **Avoid re-initializing a node that’s already initialized**: Run `skale node info` first to confirm the current initialization state.
```shell
skale node init [ENV_FILE]
@@ -111,22 +110,26 @@ Arguments:
You should specify the following environment variables:
-- `SGX_SERVER_URL` - SGX server URL
-- `DISK_MOUNTPOINT` - disk mount point for storing sChains data
-- `DOCKER_LVMPY_STREAM` - stream of `docker-lvmpy` to use
-- `CONTAINER_CONFIGS_STREAM` - stream of `skale-node` to use
-- `ENDPOINT` - RPC endpoint of the node in the network where SKALE Manager is deployed
-- `MANAGER_CONTRACTS_ABI_URL` - URL to SKALE Manager contracts ABI and addresses
-- `IMA_CONTRACTS_ABI_URL` - URL to IMA contracts ABI and addresses
-- `FILEBEAT_URL` - URL to the Filebeat log server
-- `ENV_TYPE` - environement type (mainnet, testnet, etc)
+- `SGX_SERVER_URL` - SGX server URL
+- `DISK_MOUNTPOINT` - disk mount point for storing sChains data
+- `DOCKER_LVMPY_STREAM` - stream of `docker-lvmpy` to use
+- `CONTAINER_CONFIGS_STREAM` - stream of `skale-node` to use
+- `ENDPOINT` - RPC endpoint of the node in the network where SKALE Manager is deployed
+- `MANAGER_CONTRACTS` - SKALE Manager main contract alias or address
+- `IMA_CONTRACTS` - IMA main contract alias or address
+- `FILEBEAT_URL` - URL to the Filebeat log server
+- `ENV_TYPE` - environment type (e.g., 'mainnet', 'testnet', 'qanet', 'devnet')
+> In `MANAGER_CONTRACTS` and `IMA_CONTRACTS` fields, if you are using a recognized network (e.g., 'Mainnet', 'Holesky', 'local'), you can use a recognized alias (e.g., 'production', 'grants'). You can check the list of recognized networks and aliases in [contract deployments](https://github.com/skalenetwork/skale-contracts/tree/deployments).
+> :warning: If you are using a custom network or a contract which isn't recognized by underlying skale library, you **MUST** provide a direct contract address.
Optional variables:
-- `TG_API_KEY` - Telegram API key
-- `TG_CHAT_ID` - Telegram chat ID
-- `MONITORING_CONTAINERS` - will enable monitoring containers (`filebeat`, `cadvisor`, `prometheus`)
+- `TG_API_KEY` - Telegram API key
+- `TG_CHAT_ID` - Telegram chat ID
+- `MONITORING_CONTAINERS` - will enable monitoring containers (`cadvisor`, `node-exporter`).
+
+> Filebeat is always enabled and requires `FILEBEAT_URL`, it is **not optional**.
#### Node initialization from backup
@@ -153,7 +156,6 @@ Arguments:
- `BACKUP_FOLDER_PATH` - path to the folder where the backup file will be saved
-
#### Node Registration
```shell
@@ -162,13 +164,13 @@ skale node register
Required arguments:
-- `--ip` - public IP for RPC connections and consensus
-- `--domain`/`-d` - SKALE node domain name
-- `--name` - SKALE node name
+- `--ip` - public IP for RPC connections and consensus
+- `--domain`/`-d` - SKALE node domain name
+- `--name` - SKALE node name
Optional arguments:
-- `--port` - public port - beginning of the port range for node SKALE Chains (default: `10000`)
+- `--port` - public port - beginning of the port range for node SKALE Chains (default: `10000`)
#### Node update
@@ -180,7 +182,7 @@ skale node update [ENV_FILEPATH]
Options:
-- `--yes` - update without additional confirmation
+- `--yes` - update without additional confirmation
Arguments:
@@ -199,8 +201,8 @@ skale node turn-off
Options:
-- `--maintenance-on` - set SKALE node into maintenance mode before turning off
-- `--yes` - turn off without additional confirmation
+- `--maintenance-on` - set SKALE node into maintenance mode before turning off
+- `--yes` - turn off without additional confirmation
#### Node turn-on
@@ -212,8 +214,8 @@ skale node turn-on [ENV_FILEPATH]
Options:
-- `--maintenance-off` - turn off maintenance mode after turning on the node
-- `--yes` - turn on without additional confirmation
+- `--maintenance-off` - turn off maintenance mode after turning on the node
+- `--yes` - turn on without additional confirmation
Arguments:
@@ -232,7 +234,7 @@ skale node maintenance-on
Options:
-- `--yes` - set without additional confirmation
+- `--yes` - set without additional confirmation
Switch off maintenance mode
@@ -251,7 +253,7 @@ skale node set-domain
Options:
- `--domain`/`-d` - SKALE node domain name
-- `--yes` - set without additional confirmation
+- `--yes` - set without additional confirmation
### Wallet commands
@@ -287,8 +289,8 @@ skale wallet send [ADDRESS] [AMOUNT]
Arguments:
-- `ADDRESS` - Ethereum receiver address
-- `AMOUNT` - Amount of ETH tokens to send
+- `ADDRESS` - Ethereum receiver address
+- `AMOUNT` - Amount of ETH tokens to send
Optional arguments:
@@ -330,7 +332,7 @@ skale schains info SCHAIN_NAME
Options:
-- `--json` - Show info in JSON format
+- `--json` - Show info in JSON format
#### SKALE Chain repair
@@ -354,7 +356,7 @@ skale health containers
Options:
-- `-a/--all` - list all containers (by default - only running)
+- `-a/--all` - list all containers (by default - only running)
#### sChains healthchecks
@@ -366,7 +368,7 @@ skale health schains
Options:
-- `--json` - Show data in JSON format
+- `--json` - Show data in JSON format
#### SGX
@@ -407,16 +409,15 @@ skale ssl upload
##### Options
-- `-c/--cert-path` - Path to the certificate file
-- `-k/--key-path` - Path to the key file
-- `-f/--force` - Overwrite existing certificates
+- `-c/--cert-path` - Path to the certificate file
+- `-k/--key-path` - Path to the key file
+- `-f/--force` - Overwrite existing certificates
Admin API URL: \[GET] `/api/ssl/upload`
-
#### Check ssl certificate
-Check ssl certificate be connecting to healthcheck ssl server
+Check SSL certificate by connecting to the health-check SSL server
```shell
skale ssl check
@@ -424,11 +425,11 @@ skale ssl check
##### Options
-- `-c/--cert-path` - Path to the certificate file (default: uploaded using `skale ssl upload` certificate)
-- `-k/--key-path` - Path to the key file (default: uploaded using `skale ssl upload` key)
-- `--type/-t` - Check type (`openssl` - openssl cli check, `skaled` - skaled-based check, `all` - both)
-- `--port/-p` - Port to start healthcheck server (defualt: `4536`)
-- `--no-client` - Skip client connection (only make sure server started without errors)
+- `-c/--cert-path` - Path to the certificate file (default: uploaded using `skale ssl upload` certificate)
+- `-k/--key-path` - Path to the key file (default: uploaded using `skale ssl upload` key)
+- `--type/-t` - Check type (`openssl` - openssl cli check, `skaled` - skaled-based check, `all` - both)
+- `--port/-p` - Port to start healthcheck server (default: `4536`)
+- `--no-client` - Skip client connection (only make sure server started without errors)
### Logs commands
@@ -444,7 +445,7 @@ skale logs cli
Options:
-- `--debug` - show debug logs; more detailed output
+- `--debug` - show debug logs; more detailed output
#### Dump Logs
@@ -456,8 +457,7 @@ skale logs dump [PATH]
Optional arguments:
-- `--container`, `-c` - Dump logs only from specified container
-
+- `--container`, `-c` - Dump logs only from specified container
### Resources allocation commands
@@ -470,6 +470,7 @@ Show resources allocation file:
```shell
skale resources-allocation show
```
+
#### Generate/update
Generate/update allocation file:
@@ -484,28 +485,13 @@ Arguments:
Options:
-- `--yes` - generate without additional confirmation
-- `-f/--force` - rewrite allocation file if it exists
-
-### Validate commands
-
-> Prefix: `skale validate`
-
-#### Validate abi
-
-Check whether ABI files contain valid JSON data
-
-```shell
-skale validate abi
-```
-
-Options:
-
-- `--json` - show validation result in json format
-
+- `--yes` - generate without additional confirmation
+- `-f/--force` - rewrite allocation file if it exists
## Sync CLI usage
+A sync node is a node dedicated to synchronizing a single sChain.
+
### Top level commands sync
#### Info
@@ -526,7 +512,7 @@ skale version
Options:
-- `--short` - prints version only, without additional text.
+- `--short` - prints version only, without additional text.
### Sync node commands
@@ -546,20 +532,22 @@ Arguments:
You should specify the following environment variables:
-- `DISK_MOUNTPOINT` - disk mount point for storing sChains data
-- `DOCKER_LVMPY_STREAM` - stream of `docker-lvmpy` to use
-- `CONTAINER_CONFIGS_STREAM` - stream of `skale-node` to use
-- `ENDPOINT` - RPC endpoint of the node in the network where SKALE Manager is deployed
-- `MANAGER_CONTRACTS_ABI_URL` - URL to SKALE Manager contracts ABI and addresses
-- `IMA_CONTRACTS_ABI_URL` - URL to IMA contracts ABI and addresses
-- `SCHAIN_NAME` - name of the SKALE chain to sync
-- `ENV_TYPE` - environement type (mainnet, testnet, etc)
-
+- `DISK_MOUNTPOINT` - disk mount point for storing sChains data
+- `DOCKER_LVMPY_STREAM` - stream of `docker-lvmpy` to use
+- `CONTAINER_CONFIGS_STREAM` - stream of `skale-node` to use
+- `ENDPOINT` - RPC endpoint of the node in the network where SKALE Manager is deployed
+- `MANAGER_CONTRACTS` - SKALE Manager main contract alias or address
+- `IMA_CONTRACTS` - IMA main contract alias or address
+- `SCHAIN_NAME` - name of the SKALE chain to sync
+- `ENV_TYPE` - environment type (e.g., 'mainnet', 'testnet', 'qanet', 'devnet')
+
+> In `MANAGER_CONTRACTS` and `IMA_CONTRACTS` fields, if you are using a recognized network (e.g., 'Mainnet', 'Holesky', 'local'), you can use a recognized alias (e.g., 'production', 'grants'). You can check the list of recognized networks and aliases in [contract deployments](https://github.com/skalenetwork/skale-contracts/tree/deployments).
+> :warning: If you are using a custom network or a contract which isn't recognized by underlying skale library, you **MUST** provide a direct contract address.
Options:
-- `--archive` - Run sync node in an archive node (disable block rotation)
-- `--historic-state` - Enable historic state (works only in pair with --archive flag)
+- `--archive` - Run sync node in an archive node (disable block rotation)
+- `--historic-state` - Enable historic state (works only in pair with --archive flag)
#### Sync node update
@@ -585,12 +573,12 @@ Exit codes conventions for SKALE CLI tools
- `0` - Everything is OK
- `1` - General error exit code
-- `3` - Bad API response**
-- `4` - Script execution error**
-- `5` - Transaction error*
-- `6` - Revert error*
-- `7` - Bad user error**
-- `8` - Node state error**
+- `3` - Bad API response\*\*
+- `4` - Script execution error\*\*
+- `5` - Transaction error\*
+- `6` - Revert error\*
+- `7` - Bad user error\*\*
+- `8` - Node state error\*\*
`*` - `validator-cli` only
`**` - `node-cli` only
@@ -605,6 +593,12 @@ Exit codes conventions for SKALE CLI tools
pip install -e .[dev]
```
+#### Generate info.py locally
+
+```shell
+./scripts/generate_info.sh 1.0.0 my-branch normal
+```
+
##### Add flake8 git hook
In file `.git/hooks/pre-commit` add:
@@ -612,6 +606,7 @@ In file `.git/hooks/pre-commit` add:
```shell
#!/bin/sh
flake8 .
+ruff check
```
### Debugging
@@ -622,15 +617,6 @@ Run commands in dev mode:
ENV=dev python main.py YOUR_COMMAND
```
-### Setting up Travis
-
-Required environment variables:
-
-- `ACCESS_KEY_ID` - DO Spaces/AWS S3 API Key ID
-- `SECRET_ACCESS_KEY` - DO Spaces/AWS S3 Secret access key
-- `GITHUB_EMAIL` - Email of GitHub user
-- `GITHUB_OAUTH_TOKEN` - GitHub auth token
-
## Contributing
**If you have any questions please ask our development community on [Discord](https://discord.gg/vvUtWJB).**
diff --git a/node_cli/cli/node.py b/node_cli/cli/node.py
index 8eee2d96..ebddc5c8 100644
--- a/node_cli/cli/node.py
+++ b/node_cli/cli/node.py
@@ -33,17 +33,12 @@
turn_on,
get_node_info,
set_domain_name,
- run_checks
+ run_checks,
)
from node_cli.configs import DEFAULT_NODE_BASE_PORT
from node_cli.configs.env import ALLOWED_ENV_TYPES
from node_cli.utils.decorators import check_inited
-from node_cli.utils.helper import (
- abort_if_false,
- safe_load_texts,
- streamed_cmd,
- IP_TYPE
-)
+from node_cli.utils.helper import abort_if_false, safe_load_texts, streamed_cmd, IP_TYPE
from node_cli.utils.meta import get_meta_info
from node_cli.utils.print_formatters import print_meta_info
@@ -56,48 +51,35 @@ def node_cli():
pass
-@node_cli.group(help="SKALE node commands")
+@node_cli.group(help='SKALE node commands')
def node():
pass
-@node.command('info', help="Get info about SKALE node")
+@node.command('info', help='Get info about SKALE node')
@click.option('--format', '-f', type=click.Choice(['json', 'text']))
def node_info(format):
get_node_info(format)
-@node.command('register', help="Register current node in the SKALE Manager")
-@click.option(
- '--name', '-n',
- required=True,
- prompt="Enter node name",
- help='SKALE node name'
-)
+@node.command('register', help='Register current node in the SKALE Manager')
+@click.option('--name', '-n', required=True, prompt='Enter node name', help='SKALE node name')
@click.option(
'--ip',
- prompt="Enter node public IP",
+ prompt='Enter node public IP',
type=IP_TYPE,
- help='Public IP for RPC connections & consensus (required)'
+ help='Public IP for RPC connections & consensus (required)',
)
@click.option(
- '--port', '-p',
- default=DEFAULT_NODE_BASE_PORT,
- type=int,
- help='Base port for node sChains'
-)
-@click.option(
- '--domain', '-d',
- prompt="Enter node domain name",
- type=str,
- help='Node domain name'
+ '--port', '-p', default=DEFAULT_NODE_BASE_PORT, type=int, help='Base port for node sChains'
)
+@click.option('--domain', '-d', prompt='Enter node domain name', type=str, help='Node domain name')
@streamed_cmd
def register_node(name, ip, port, domain):
register(name, ip, ip, port, domain)
-@node.command('init', help="Initialize SKALE node")
+@node.command('init', help='Initialize SKALE node')
@click.argument('env_file')
@streamed_cmd
def init_node(env_file):
@@ -105,17 +87,15 @@ def init_node(env_file):
@node.command('update', help='Update node from .env file')
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to update SKALE node software?')
-@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str)
@click.option(
- '--unsafe',
- 'unsafe_ok',
- help='Allow unsafe update',
- hidden=True,
- is_flag=True
+ '--yes',
+ is_flag=True,
+ callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to update SKALE node software?',
)
+@click.option('--pull-config', 'pull_config_for_schain', hidden=True, type=str)
+@click.option('--unsafe', 'unsafe_ok', help='Allow unsafe update', hidden=True, is_flag=True)
@click.argument('env_file')
@streamed_cmd
def update_node(env_file, pull_config_for_schain, unsafe_ok):
@@ -129,43 +109,44 @@ def signature(validator_id):
print(f'Signature: {res}')
-@node.command('backup', help="Generate backup file to restore SKALE node on another machine")
+@node.command('backup', help='Generate backup file to restore SKALE node on another machine')
@click.argument('backup_folder_path')
@streamed_cmd
def backup_node(backup_folder_path):
backup(backup_folder_path)
-@node.command('restore', help="Restore SKALE node on another machine")
+@node.command('restore', help='Restore SKALE node on another machine')
@click.argument('backup_path')
@click.argument('env_file')
@click.option(
- '--no-snapshot',
- help='Do not restore sChains from snapshot',
- is_flag=True,
- hidden=True
+ '--no-snapshot', help='Do not restore sChains from snapshot', is_flag=True, hidden=True
)
@click.option(
'--config-only',
help='Only restore configuration files in .skale and artifacts',
is_flag=True,
- hidden=True
+ hidden=True,
)
@streamed_cmd
def restore_node(backup_path, env_file, no_snapshot, config_only):
restore(backup_path, env_file, no_snapshot, config_only)
-@node.command('maintenance-on', help="Set SKALE node into maintenance mode")
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to set SKALE node into maintenance mode?')
+@node.command('maintenance-on', help='Set SKALE node into maintenance mode')
+@click.option(
+ '--yes',
+ is_flag=True,
+ callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to set SKALE node into maintenance mode?',
+)
@streamed_cmd
def set_node_in_maintenance():
set_maintenance_mode_on()
-@node.command('maintenance-off', help="Remove SKALE node from maintenance mode")
+@node.command('maintenance-off', help='Remove SKALE node from maintenance mode')
@streamed_cmd
def remove_node_from_maintenance():
set_maintenance_mode_off()
@@ -173,20 +154,16 @@ def remove_node_from_maintenance():
@node.command('turn-off', help='Turn off the node')
@click.option(
- '--maintenance-on',
- help='Set SKALE node into maintenance mode before turning off',
- is_flag=True
+ '--maintenance-on', help='Set SKALE node into maintenance mode before turning off', is_flag=True
)
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to turn off the node?')
@click.option(
- '--unsafe',
- 'unsafe_ok',
- help='Allow unsafe turn-off',
- hidden=True,
- is_flag=True
+ '--yes',
+ is_flag=True,
+ callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to turn off the node?',
)
+@click.option('--unsafe', 'unsafe_ok', help='Allow unsafe turn-off', hidden=True, is_flag=True)
@streamed_cmd
def _turn_off(maintenance_on, unsafe_ok):
turn_off(maintenance_on, unsafe_ok)
@@ -194,35 +171,36 @@ def _turn_off(maintenance_on, unsafe_ok):
@node.command('turn-on', help='Turn on the node')
@click.option(
- '--maintenance-off',
- help='Turn off maintenance mode after turning on the node',
- is_flag=True
+ '--maintenance-off', help='Turn off maintenance mode after turning on the node', is_flag=True
)
@click.option(
'--sync-schains',
help='Run all sChains in the snapshot download mode',
is_flag=True,
- hidden=True
+ hidden=True,
+)
+@click.option(
+ '--yes',
+ is_flag=True,
+ callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to turn on the node?',
)
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to turn on the node?')
@click.argument('env_file')
@streamed_cmd
def _turn_on(maintenance_off, sync_schains, env_file):
turn_on(maintenance_off, sync_schains, env_file)
-@node.command('set-domain', help="Set node domain name")
+@node.command('set-domain', help='Set node domain name')
+@click.option('--domain', '-d', prompt='Enter node domain name', type=str, help='Node domain name')
@click.option(
- '--domain', '-d',
- prompt="Enter node domain name",
- type=str,
- help='Node domain name'
+ '--yes',
+ is_flag=True,
+ callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to set domain name?',
)
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to set domain name?')
@streamed_cmd
def _set_domain_name(domain):
set_domain_name(domain)
@@ -230,10 +208,11 @@ def _set_domain_name(domain):
@node.command(help='Check if node meet network requirements')
@click.option(
- '--network', '-n',
+ '--network',
+ '-n',
type=click.Choice(ALLOWED_ENV_TYPES),
default='mainnet',
- help='Network to check'
+ help='Network to check',
)
def check(network):
run_checks(network)
@@ -241,21 +220,20 @@ def check(network):
@node.command(help='Reconfigure nftables rules')
@click.option('--monitoring', is_flag=True)
-@click.option('--yes', is_flag=True, callback=abort_if_false,
- expose_value=False,
- prompt='Are you sure you want to reconfigure firewall rules?')
+@click.option(
+ '--yes',
+ is_flag=True,
+ callback=abort_if_false,
+ expose_value=False,
+ prompt='Are you sure you want to reconfigure firewall rules?',
+)
def configure_firewall(monitoring):
configure_firewall_rules(enable_monitoring=monitoring)
@node.command(help='Show node version information')
@check_inited
-@click.option(
- '--json',
- 'raw',
- is_flag=True,
- help=TEXTS['common']['json']
-)
+@click.option('--json', 'raw', is_flag=True, help=TEXTS['common']['json'])
def version(raw: bool) -> None:
meta_info = get_meta_info(raw=raw)
if raw:
diff --git a/node_cli/cli/validate.py b/node_cli/cli/validate.py
deleted file mode 100644
index f8134df9..00000000
--- a/node_cli/cli/validate.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of node-cli
-#
-# Copyright (C) 2019 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import click
-
-from node_cli.core.host import validate_abi_files
-
-
-@click.group()
-def validate_cli():
- pass
-
-
-@validate_cli.group(help="Validation commands")
-def validate():
- pass
-
-
-@validate.command('abi', help="Validate contracts abi")
-@click.option(
- '--json',
- 'json_format',
- help='Show result in JSON format',
- is_flag=True
-)
-def abi(json_format):
- validate_abi_files(json_result=json_format)
diff --git a/node_cli/configs/__init__.py b/node_cli/configs/__init__.py
index 19d9e000..36363253 100644
--- a/node_cli/configs/__init__.py
+++ b/node_cli/configs/__init__.py
@@ -24,8 +24,7 @@
GLOBAL_SKALE_DIR = os.getenv('GLOBAL_SKALE_DIR') or '/etc/skale'
GLOBAL_SKALE_CONF_FILENAME = 'conf.json'
-GLOBAL_SKALE_CONF_FILEPATH = os.path.join(
- GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILENAME)
+GLOBAL_SKALE_CONF_FILEPATH = os.path.join(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILENAME)
GLOBAL_CONFIG = read_g_config(GLOBAL_SKALE_DIR, GLOBAL_SKALE_CONF_FILEPATH)
G_CONF_USER = GLOBAL_CONFIG['user']
@@ -65,8 +64,7 @@
LOG_PATH = os.path.join(NODE_DATA_PATH, 'log')
REMOVED_CONTAINERS_FOLDER_NAME = '.removed_containers'
-REMOVED_CONTAINERS_FOLDER_PATH = os.path.join(
- LOG_PATH, REMOVED_CONTAINERS_FOLDER_NAME)
+REMOVED_CONTAINERS_FOLDER_PATH = os.path.join(LOG_PATH, REMOVED_CONTAINERS_FOLDER_NAME)
ETH_STATE_PATH = os.path.join(NODE_DATA_PATH, 'eth-state')
NODE_CERTS_PATH = os.path.join(NODE_DATA_PATH, 'ssl')
@@ -105,7 +103,7 @@
def _get_env():
try:
- sys._MEIPASS
+ sys._MEIPASS # type: ignore
except AttributeError:
return 'dev'
return 'prod'
@@ -118,7 +116,7 @@ def _get_env():
PARDIR = os.path.join(CURRENT_FILE_LOCATION, os.pardir)
PROJECT_DIR = os.path.join(PARDIR, os.pardir)
else:
- PARDIR = os.path.join(sys._MEIPASS, 'data')
+ PARDIR = os.path.join(sys._MEIPASS, 'data') # type: ignore
PROJECT_DIR = PARDIR
TEXT_FILE = os.path.join(PROJECT_DIR, 'text.yml')
@@ -141,9 +139,6 @@ def _get_env():
TM_INIT_TIMEOUT = 20
RESTORE_SLEEP_TIMEOUT = 20
-MANAGER_CONTRACTS_FILEPATH = os.path.join(CONTRACTS_PATH, 'manager.json')
-IMA_CONTRACTS_FILEPATH = os.path.join(CONTRACTS_PATH, 'ima.json')
-
META_FILEPATH = os.path.join(NODE_DATA_PATH, 'meta.json')
SKALE_NODE_REPO_URL = 'https://github.com/skalenetwork/skale-node.git'
diff --git a/node_cli/configs/env.py b/node_cli/configs/env.py
index 7b6bf116..3b905885 100644
--- a/node_cli/configs/env.py
+++ b/node_cli/configs/env.py
@@ -1,19 +1,37 @@
+"""Environment configuration and validation module for SKALE node.
+
+This module handles environment variable loading, validation, and configuration
+for SKALE node setup. It ensures all required parameters are present and valid.
+"""
+
import os
+from typing import Dict, List, Optional
from dotenv import load_dotenv
-from node_cli.configs import SKALE_DIR, CONTAINER_CONFIG_PATH
+import requests
+from enum import Enum
+from node_cli.configs import SKALE_DIR, CONTAINER_CONFIG_PATH
+from node_cli.utils.helper import error_exit, is_contract_address
+from node_cli.utils.exit_codes import CLIExitCodes
SKALE_DIR_ENV_FILEPATH = os.path.join(SKALE_DIR, '.env')
CONFIGS_ENV_FILEPATH = os.path.join(CONTAINER_CONFIG_PATH, '.env')
-
ALLOWED_ENV_TYPES = ['mainnet', 'testnet', 'qanet', 'devnet']
-REQUIRED_PARAMS = {
+
+class ContractType(Enum):
+ """Contract types supported by the system with skale-contracts integration."""
+
+ IMA = 'mainnet-ima'
+ MANAGER = 'skale-manager'
+
+
+REQUIRED_PARAMS: Dict[str, str] = {
'CONTAINER_CONFIGS_STREAM': '',
'ENDPOINT': '',
- 'MANAGER_CONTRACTS_ABI_URL': '',
- 'IMA_CONTRACTS_ABI_URL': '',
+ 'MANAGER_CONTRACTS': '',
+ 'IMA_CONTRACTS': '',
'FILEBEAT_HOST': '',
'DISK_MOUNTPOINT': '',
'SGX_SERVER_URL': '',
@@ -21,18 +39,18 @@
'ENV_TYPE': '',
}
-REQUIRED_PARAMS_SYNC = {
+REQUIRED_PARAMS_SYNC: Dict[str, str] = {
'SCHAIN_NAME': '',
'CONTAINER_CONFIGS_STREAM': '',
'ENDPOINT': '',
- 'MANAGER_CONTRACTS_ABI_URL': '',
- 'IMA_CONTRACTS_ABI_URL': '',
+ 'MANAGER_CONTRACTS': '',
+ 'IMA_CONTRACTS': '',
'DISK_MOUNTPOINT': '',
'DOCKER_LVMPY_STREAM': '',
- 'ENV_TYPE': ''
+ 'ENV_TYPE': '',
}
-OPTIONAL_PARAMS = {
+OPTIONAL_PARAMS: Dict[str, str] = {
'MONITORING_CONTAINERS': '',
'TELEGRAF': '',
'INFLUX_TOKEN': '',
@@ -45,36 +63,170 @@
'DEFAULT_GAS_PRICE_WEI': '',
'SKIP_DOCKER_CONFIG': '',
'ENFORCE_BTRFS': '',
- 'SKIP_DOCKER_CLEANUP': ''
+ 'SKIP_DOCKER_CLEANUP': '',
}
-def absent_params(params):
- return list(filter(
- lambda key: key not in OPTIONAL_PARAMS and not params[key],
- params)
- )
+def absent_params(params: Dict[str, str]) -> List[str]:
+ """Return a list of required parameters that are missing or empty."""
+ return [key for key in params if key not in OPTIONAL_PARAMS and not params[key]]
+
+def get_env_config(
+ env_filepath: str = SKALE_DIR_ENV_FILEPATH, sync_node: bool = False
+) -> Dict[str, str]:
+ """Load and validate environment configuration from a file."""
+ load_env_file(env_filepath)
+ params = build_params(sync_node)
+ populate_params(params)
+ validate_params(params)
+ return params
-def get_env_config(env_filepath: str = SKALE_DIR_ENV_FILEPATH, sync_node: bool = False):
- load_dotenv(dotenv_path=env_filepath)
+
+def load_env_file(env_filepath: str) -> None:
+ """Check and load environment variables from the given file."""
+ if not os.path.exists(env_filepath):
+ error_exit(f'Environment file not found: {env_filepath}', CLIExitCodes.FAILURE)
+ if not os.access(env_filepath, os.R_OK):
+ error_exit(f'Cannot read environment file: {env_filepath}', CLIExitCodes.FAILURE)
+ if not load_dotenv(dotenv_path=env_filepath):
+ error_exit(f'Failed to load environment from {env_filepath}', CLIExitCodes.FAILURE)
+
+
+def build_params(sync_node: bool = False) -> Dict[str, str]:
+ """Return a dictionary of parameters based on node type."""
params = REQUIRED_PARAMS_SYNC.copy() if sync_node else REQUIRED_PARAMS.copy()
params.update(OPTIONAL_PARAMS)
- for option_name in params:
- env_param = os.getenv(option_name)
- if env_param is not None:
- params[option_name] = str(env_param)
- validate_params(params)
return params
-def validate_params(params): # todo: temporary fix
- if params['ENV_TYPE'] not in ALLOWED_ENV_TYPES:
- raise NotValidEnvParamsError(
- f'Allowed ENV_TYPE values are {ALLOWED_ENV_TYPES}. '
- f'Actual: "{params["ENV_TYPE"]}"'
+def populate_params(params: Dict[str, str]) -> None:
+ """Populate params dictionary with environment variable values."""
+ for key in params:
+ env_value = os.getenv(key)
+ if env_value is not None:
+ params[key] = str(env_value)
+
+
+def validate_params(params: Dict[str, str]) -> None:
+ """Validate environment parameters."""
+ missing = absent_params(params)
+ if missing:
+ error_exit(f'Missing required parameters: {missing}', CLIExitCodes.FAILURE)
+ validate_env_type(params['ENV_TYPE'])
+ # Get the endpoint explicitly from the params.
+ endpoint = params['ENDPOINT']
+ validate_env_alias_or_address(params['IMA_CONTRACTS'], ContractType.IMA, endpoint)
+ validate_env_alias_or_address(params['MANAGER_CONTRACTS'], ContractType.MANAGER, endpoint)
+
+
+def validate_env_type(env_type: str) -> None:
+ """Validate the environment type."""
+ if env_type not in ALLOWED_ENV_TYPES:
+ error_exit(
+ f'Allowed ENV_TYPE values are {ALLOWED_ENV_TYPES}. Actual: "{env_type}"',
+ CLIExitCodes.FAILURE,
+ )
+
+
+def validate_env_alias_or_address(
+ alias_or_address: str, contract_type: ContractType, endpoint: str
+) -> None:
+ """Validate contract alias or address."""
+ if not alias_or_address:
+ param_name = 'IMA_CONTRACTS' if contract_type == ContractType.IMA else 'MANAGER_CONTRACTS'
+ error_exit(f'{param_name} is not set', CLIExitCodes.FAILURE)
+ if is_contract_address(alias_or_address):
+ validate_contract_address(alias_or_address, endpoint)
+ else:
+ validate_contract_alias(alias_or_address, contract_type, endpoint)
+
+
+def validate_contract_address(contract_address: str, endpoint: str) -> None:
+ """Validate if the given contract address has deployed code."""
+ try:
+ response = requests.post(
+ endpoint,
+ json={
+ 'jsonrpc': '2.0',
+ 'method': 'eth_getCode',
+ 'params': [contract_address, 'latest'],
+ 'id': 1,
+ },
)
+ if response.status_code != 200:
+ error_exit(
+ f'Failed to verify contract at address {contract_address}', CLIExitCodes.FAILURE
+ )
+ result = response.json().get('result')
+ if not result or result in ['0x', '0x0']:
+ error_exit(
+ f'No contract code found at address {contract_address}', CLIExitCodes.FAILURE
+ )
+ except requests.RequestException as e:
+ error_exit(f'Failed to validate contract address: {str(e)}', CLIExitCodes.FAILURE)
+
+
+def validate_contract_alias(alias: str, contract_type: ContractType, endpoint: str) -> None:
+ """Validate if the given contract alias exists in deployments for the current network."""
+ try:
+ chain_id = get_chain_id(endpoint)
+ metadata = get_network_metadata()
+ networks = metadata.get('networks', [])
+ network_path: Optional[str] = None
+ for net in networks:
+ if net.get('chainId') == chain_id:
+ network_path = net.get('path')
+ break
+ if not network_path:
+ error_exit(
+ f'Network with chain ID {chain_id} not found in metadata', CLIExitCodes.FAILURE
+ )
+ deployment_url = (
+ f'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ f'refs/heads/deployments/{network_path}/{contract_type.value}/{alias}.json'
+ )
+ if requests.get(deployment_url).status_code != 200:
+ error_exit(
+ f"Contract alias '{alias}' not found for {contract_type.value}",
+ CLIExitCodes.FAILURE,
+ )
+ except requests.RequestException as e:
+ error_exit(f"Failed to validate contract alias '{alias}': {str(e)}", CLIExitCodes.FAILURE)
+
+
+def get_chain_id(endpoint: str) -> int:
+ """Fetch chain ID from the JSON-RPC endpoint."""
+ try:
+ response = requests.post(
+ endpoint,
+ json={'jsonrpc': '2.0', 'method': 'eth_chainId', 'params': [], 'id': 1},
+ )
+ if response.status_code != 200:
+ error_exit('Failed to get chain ID from endpoint', CLIExitCodes.FAILURE)
+ return int(response.json()['result'], 16)
+ except requests.RequestException as e:
+ error_exit(f'Failed to get chain ID: {str(e)}', CLIExitCodes.FAILURE)
+ # Will never reach this line, but needed for type checking.
+ return 0
+
+
+def get_network_metadata() -> Dict:
+ """Fetch network metadata from GitHub."""
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ try:
+ response = requests.get(metadata_url)
+ if response.status_code != 200:
+ error_exit('Failed to fetch networks metadata', CLIExitCodes.FAILURE)
+ return response.json()
+ except requests.RequestException as e:
+ error_exit(f'Failed to fetch networks metadata: {str(e)}', CLIExitCodes.FAILURE)
+ # Will never reach this line, but needed for type checking.
+ return {}
class NotValidEnvParamsError(Exception):
- """Raised when something is wrong with provided env params"""
+ """Raised when environment parameters are invalid or missing."""
diff --git a/node_cli/core/host.py b/node_cli/core/host.py
index 0456ac89..d66c1a54 100644
--- a/node_cli/core/host.py
+++ b/node_cli/core/host.py
@@ -17,36 +17,45 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import json
import logging
import os
from shutil import copyfile
from urllib.parse import urlparse
from node_cli.core.resources import update_resource_allocation
+from node_cli.utils.helper import error_exit
+from node_cli.utils.exit_codes import CLIExitCodes
from node_cli.configs import (
- ADMIN_PORT, AUTOLOAD_KERNEL_MODULES_PATH,
- BTRFS_KERNEL_MODULE, DEFAULT_URL_SCHEME, NODE_DATA_PATH,
- SKALE_DIR, CONTAINER_CONFIG_PATH, CONTRACTS_PATH,
- ETH_STATE_PATH, NODE_CERTS_PATH, SGX_CERTS_PATH,
- REPORTS_PATH, REDIS_DATA_PATH,
- SCHAINS_DATA_PATH, LOG_PATH,
+ ADMIN_PORT,
+ AUTOLOAD_KERNEL_MODULES_PATH,
+ BTRFS_KERNEL_MODULE,
+ DEFAULT_URL_SCHEME,
+ NODE_DATA_PATH,
+ SKALE_DIR,
+ CONTAINER_CONFIG_PATH,
+ CONTRACTS_PATH,
+ ETH_STATE_PATH,
+ NODE_CERTS_PATH,
+ SGX_CERTS_PATH,
+ REPORTS_PATH,
+ REDIS_DATA_PATH,
+ SCHAINS_DATA_PATH,
+ LOG_PATH,
REMOVED_CONTAINERS_FOLDER_PATH,
- IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH,
- SKALE_RUN_DIR, SKALE_STATE_DIR, SKALE_TMP_DIR,
- UFW_CONFIG_PATH, UFW_IPV6_BEFORE_INPUT_CHAIN
-)
-from node_cli.configs.resource_allocation import (
- RESOURCE_ALLOCATION_FILEPATH
+ SKALE_RUN_DIR,
+ SKALE_STATE_DIR,
+ SKALE_TMP_DIR,
+ UFW_CONFIG_PATH,
+ UFW_IPV6_BEFORE_INPUT_CHAIN,
)
+from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH
from node_cli.configs.cli_logger import LOG_DATA_PATH
from node_cli.configs.env import SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH
from node_cli.core.nftables import NFTablesManager
from node_cli.utils.helper import safe_mkdir
-from node_cli.utils.print_formatters import print_abi_validation_errors
-from node_cli.utils.helper import safe_load_texts, validate_abi
+from node_cli.utils.helper import safe_load_texts
TEXTS = safe_load_texts()
@@ -65,52 +74,91 @@ def fix_url(url):
return False
-def get_flask_secret_key():
+def get_flask_secret_key() -> str:
+ """Retrieve Flask secret key from filesystem."""
secret_key_filepath = os.path.join(NODE_DATA_PATH, 'flask_db_key.txt')
- with open(secret_key_filepath) as key_file:
- return key_file.read().strip()
+
+ if not os.path.exists(secret_key_filepath):
+ error_exit(
+ f'Flask secret key file not found at {secret_key_filepath}', CLIExitCodes.FAILURE
+ )
+
+ try:
+ with open(secret_key_filepath, 'r') as key_file:
+ secret_key = key_file.read().strip()
+ return secret_key
+ except (IOError, OSError) as e:
+ error_exit(f'Failed to read Flask secret key: {e}', CLIExitCodes.FAILURE)
+ # Will never reach here, but needed for type checking.
+ return ''
-def prepare_host(
- env_filepath: str,
- env_type: str,
- allocation: bool = False
-):
- logger.info('Preparing host started')
- make_dirs()
- save_env_params(env_filepath)
+def prepare_host(env_filepath: str, env_type: str, allocation: bool = False) -> None:
+ """Initialize SKALE node host environment."""
+ if not env_filepath or not env_type:
+ error_exit('Missing required parameters for host initialization', CLIExitCodes.FAILURE)
- if allocation:
- update_resource_allocation(env_type)
+ try:
+ logger.info('Preparing host started')
+ make_dirs()
+ save_env_params(env_filepath)
+ if allocation:
+ update_resource_allocation(env_type)
+ except Exception as e:
+ error_exit(f'Failed to prepare host: {str(e)}', CLIExitCodes.FAILURE)
-def is_node_inited():
- return os.path.isfile(RESOURCE_ALLOCATION_FILEPATH)
+
+def is_node_inited() -> bool:
+ """Check if the SKALE node has been initialized.
+
+ Determines initialization status by checking for existence of the
+ resource allocation file.
+ """
+ try:
+ # Check if resource allocation file exists as initialization indicator
+ return os.path.isfile(RESOURCE_ALLOCATION_FILEPATH)
+ except OSError as e:
+ logger.error(f'Error checking node initialization status: {e}')
+ return False
def make_dirs():
for dir_path in (
- SKALE_DIR, NODE_DATA_PATH, CONTAINER_CONFIG_PATH,
- CONTRACTS_PATH, ETH_STATE_PATH, NODE_CERTS_PATH,
- REMOVED_CONTAINERS_FOLDER_PATH,
- SGX_CERTS_PATH, SCHAINS_DATA_PATH, LOG_PATH,
- REPORTS_PATH, REDIS_DATA_PATH,
- SKALE_RUN_DIR, SKALE_STATE_DIR, SKALE_TMP_DIR
+ SKALE_DIR,
+ NODE_DATA_PATH,
+ CONTAINER_CONFIG_PATH,
+ CONTRACTS_PATH,
+ ETH_STATE_PATH,
+ NODE_CERTS_PATH,
+ REMOVED_CONTAINERS_FOLDER_PATH,
+ SGX_CERTS_PATH,
+ SCHAINS_DATA_PATH,
+ LOG_PATH,
+ REPORTS_PATH,
+ REDIS_DATA_PATH,
+ SKALE_RUN_DIR,
+ SKALE_STATE_DIR,
+ SKALE_TMP_DIR,
):
safe_mkdir(dir_path)
-def save_env_params(env_filepath):
- copyfile(env_filepath, SKALE_DIR_ENV_FILEPATH)
+def save_env_params(env_filepath: str) -> None:
+ """Copy environment parameters file to SKALE directory."""
+ if not os.path.isfile(env_filepath):
+ error_exit(f'Environment file not found: {env_filepath}', CLIExitCodes.FAILURE)
+ if not os.access(env_filepath, os.R_OK):
+ error_exit(f'Cannot read environment file: {env_filepath}', CLIExitCodes.FAILURE)
+ try:
+ copyfile(env_filepath, SKALE_DIR_ENV_FILEPATH)
+ except (IOError, OSError) as e:
+ error_exit(f'Failed to copy environment file: {e}', CLIExitCodes.FAILURE)
def link_env_file():
- if not (os.path.islink(CONFIGS_ENV_FILEPATH) or
- os.path.isfile(CONFIGS_ENV_FILEPATH)):
- logger.info(
- 'Creating symlink %s → %s',
- SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH
- )
+ if not (os.path.islink(CONFIGS_ENV_FILEPATH) or os.path.isfile(CONFIGS_ENV_FILEPATH)):
+ logger.info('Creating symlink %s → %s', SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH)
os.symlink(SKALE_DIR_ENV_FILEPATH, CONFIGS_ENV_FILEPATH)
@@ -130,10 +178,7 @@ def is_btrfs_module_autoloaded(modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH):
modules = set(
map(
lambda line: line.strip(),
- filter(
- lambda line: not line.startswith('#'),
- modules_file.readlines()
- )
+ filter(lambda line: not line.startswith('#'), modules_file.readlines()),
)
)
return BTRFS_KERNEL_MODULE in modules
@@ -144,9 +189,7 @@ def add_btrfs_module_to_autoload(modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH):
modules_file.write(f'{BTRFS_KERNEL_MODULE}\n')
-def ensure_btrfs_kernel_module_autoloaded(
- modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH
-):
+def ensure_btrfs_kernel_module_autoloaded(modules_filepath=AUTOLOAD_KERNEL_MODULES_PATH):
logger.debug('Checking if btrfs is in %s', modules_filepath)
if not is_btrfs_module_autoloaded(modules_filepath):
logger.info('Adding btrfs module to %s', modules_filepath)
@@ -155,24 +198,6 @@ def ensure_btrfs_kernel_module_autoloaded(
logger.debug('btrfs is already in %s', modules_filepath)
-def validate_abi_files(json_result=False):
- results = [
- validate_abi(abi_filepath)
- for abi_filepath in [
- MANAGER_CONTRACTS_FILEPATH,
- IMA_CONTRACTS_FILEPATH
- ]
- ]
- if any(r['status'] == 'error' for r in results):
- print('Some files do not exist or are incorrect')
- print_abi_validation_errors(results, raw=json_result)
- else:
- if json_result:
- print(json.dumps({'result': 'ok'}))
- else:
- print('All abi files are correct json files!')
-
-
def is_ufw_ipv6_option_enabled() -> bool:
"""Check if UFW is enabled and IPv6 is configured."""
if os.path.isfile(UFW_CONFIG_PATH):
diff --git a/node_cli/core/node.py b/node_cli/core/node.py
index 0d5eeb72..e51209b0 100644
--- a/node_cli/core/node.py
+++ b/node_cli/core/node.py
@@ -68,7 +68,6 @@
error_exit,
get_request,
post_request,
- extract_env_params,
)
from node_cli.utils.meta import get_meta_info
from node_cli.utils.texts import Texts
@@ -140,8 +139,6 @@ def register_node(name, p2p_ip, public_ip, port, domain_name):
@check_not_inited
def init(env_filepath):
env = compose_node_env(env_filepath)
- if env is None:
- return
inited_ok = init_op(env_filepath, env)
if not inited_ok:
@@ -213,7 +210,7 @@ def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None:
@check_inited
@check_user
def repair_sync(archive: bool, historic_state: bool, snapshot_from: str) -> None:
- env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=True)
+ env_params = get_env_config(INIT_ENV_FILEPATH, sync_node=True)
schain_name = env_params['SCHAIN_NAME']
repair_sync_op(
schain_name=schain_name,
@@ -225,21 +222,25 @@ def repair_sync(archive: bool, historic_state: bool, snapshot_from: str) -> None
def compose_node_env(
- env_filepath,
- inited_node=False,
- sync_schains=None,
- pull_config_for_schain=None,
- sync_node=False,
- save: bool = True
-):
+ env_filepath: Optional[str],
+ inited_node: bool = False,
+ sync_schains: Optional[bool] = None,
+ pull_config_for_schain: Optional[str] = None,
+ sync_node: bool = False,
+ save: bool = True,
+) -> dict:
+ """Compose environment variables dictionary for SKALE node."""
if env_filepath is not None:
- env_params = extract_env_params(env_filepath, sync_node=sync_node, raise_for_status=True)
+ env_params = get_env_config(env_filepath, sync_node=sync_node)
if save:
save_env_params(env_filepath)
else:
- env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=sync_node)
+ env_params = get_env_config(INIT_ENV_FILEPATH, sync_node=sync_node)
+ # Set mount directory based on node type
mnt_dir = SCHAINS_MNT_DIR_SYNC if sync_node else SCHAINS_MNT_DIR_REGULAR
+
+ # Compose base environment dictionary
env = {
'SKALE_DIR': SKALE_DIR,
'SCHAINS_MNT_DIR': mnt_dir,
@@ -247,13 +248,20 @@ def compose_node_env(
'SKALE_LIB_PATH': SKALE_STATE_DIR,
**env_params,
}
+
+ # Add Flask secret key for initialized non-sync nodes
if inited_node and not sync_node:
- flask_secret_key = get_flask_secret_key()
- env['FLASK_SECRET_KEY'] = flask_secret_key
+ env['FLASK_SECRET_KEY'] = get_flask_secret_key()
+
+ # Enable backup run for syncing schains
if sync_schains and not sync_node:
env['BACKUP_RUN'] = 'True'
+
+ # Add schain config pull parameter if specified
if pull_config_for_schain:
env['PULL_CONFIG_FOR_SCHAIN'] = pull_config_for_schain
+
+ # Remove empty values and return
return {k: v for k, v in env.items() if v != ''}
diff --git a/node_cli/core/resources.py b/node_cli/core/resources.py
index f47ef792..64467c23 100644
--- a/node_cli/core/resources.py
+++ b/node_cli/core/resources.py
@@ -24,16 +24,21 @@
import psutil
+from node_cli.configs.env import get_env_config
from node_cli.utils.docker_utils import ensure_volume
from node_cli.utils.schain_types import SchainTypes
-from node_cli.utils.helper import (
- write_json, read_json, run_cmd, extract_env_params, safe_load_yml
-)
+from node_cli.utils.helper import write_json, read_json, run_cmd, safe_load_yml
from node_cli.configs import ALLOCATION_FILEPATH, STATIC_PARAMS_FILEPATH, SNAPSHOTS_SHARED_VOLUME
from node_cli.configs.resource_allocation import (
- RESOURCE_ALLOCATION_FILEPATH, TIMES, TIMEOUT,
- TEST_DIVIDER, SMALL_DIVIDER, MEDIUM_DIVIDER, LARGE_DIVIDER,
- MEMORY_FACTOR, MAX_CPU_SHARES
+ RESOURCE_ALLOCATION_FILEPATH,
+ TIMES,
+ TIMEOUT,
+ TEST_DIVIDER,
+ SMALL_DIVIDER,
+ MEDIUM_DIVIDER,
+ LARGE_DIVIDER,
+ MEMORY_FACTOR,
+ MAX_CPU_SHARES,
)
logger = logging.getLogger(__name__)
@@ -50,7 +55,7 @@ def __init__(self, value, fractional=False):
'test': value / TEST_DIVIDER,
'small': value / SMALL_DIVIDER,
'medium': value / MEDIUM_DIVIDER,
- 'large': value / LARGE_DIVIDER
+ 'large': value / LARGE_DIVIDER,
}
if not fractional:
for k in self.values:
@@ -67,10 +72,7 @@ def get_resource_allocation_info():
return None
-def compose_resource_allocation_config(
- env_type: str,
- params_by_env_type: Dict = None
-) -> Dict:
+def compose_resource_allocation_config(env_type: str, params_by_env_type: Dict = None) -> Dict:
params_by_env_type = params_by_env_type or safe_load_yml(STATIC_PARAMS_FILEPATH)
common_config = params_by_env_type['common']
schain_cpu_alloc, ima_cpu_alloc = get_cpu_alloc(common_config)
@@ -83,12 +85,9 @@ def compose_resource_allocation_config(
'mem': schain_mem_alloc.dict(),
'disk': schain_allocation_data[env_type]['disk'],
'volume_limits': schain_allocation_data[env_type]['volume_limits'], # noqa
- 'leveldb_limits': schain_allocation_data[env_type]['leveldb_limits'] # noqa
+ 'leveldb_limits': schain_allocation_data[env_type]['leveldb_limits'], # noqa
},
- 'ima': {
- 'cpu_shares': ima_cpu_alloc.dict(),
- 'mem': ima_mem_alloc.dict()
- }
+ 'ima': {'cpu_shares': ima_cpu_alloc.dict(), 'mem': ima_mem_alloc.dict()},
}
@@ -98,22 +97,17 @@ def generate_resource_allocation_config(env_file, force=False) -> None:
logger.debug(msg)
print(msg)
return
- env_params = extract_env_params(env_file)
+ env_params = get_env_config(env_file)
if env_params is None:
return
logger.info('Generating resource allocation file ...')
try:
- update_resource_allocation(
- env_params['ENV_TYPE']
- )
+ update_resource_allocation(env_params['ENV_TYPE'])
except Exception as e:
logger.exception(e)
- print('Can\'t generate resource allocation file, check out CLI logs')
+ print("Can't generate resource allocation file, check out CLI logs")
else:
- print(
- f'Resource allocation file generated: '
- f'{RESOURCE_ALLOCATION_FILEPATH}'
- )
+ print(f'Resource allocation file generated: {RESOURCE_ALLOCATION_FILEPATH}')
def update_resource_allocation(env_type: str) -> None:
@@ -151,10 +145,7 @@ def get_cpu_alloc(common_config: Dict) -> ResourceAlloc:
cpu_proportions = common_config['schain']['cpu']
schain_max_cpu_shares = int(cpu_proportions['skaled'] * MAX_CPU_SHARES)
ima_max_cpu_shares = int(cpu_proportions['ima'] * MAX_CPU_SHARES)
- return (
- ResourceAlloc(schain_max_cpu_shares),
- ResourceAlloc(ima_max_cpu_shares)
- )
+ return (ResourceAlloc(schain_max_cpu_shares), ResourceAlloc(ima_max_cpu_shares))
def verify_disk_size(
diff --git a/node_cli/main.py b/node_cli/main.py
index 6fefa7b9..d6331fd2 100644
--- a/node_cli/main.py
+++ b/node_cli/main.py
@@ -36,10 +36,10 @@
from node_cli.cli.wallet import wallet_cli
from node_cli.cli.ssl import ssl_cli
from node_cli.cli.exit import exit_cli
-from node_cli.cli.validate import validate_cli
from node_cli.cli.resources_allocation import resources_allocation_cli
from node_cli.cli.sync_node import sync_node_cli
+from node_cli.utils.exit_codes import CLIExitCodes
from node_cli.utils.helper import safe_load_texts, init_default_logger
from node_cli.configs import LONG_LINE
from node_cli.core.host import init_logs_dir
@@ -55,7 +55,7 @@ def cli():
pass
-@cli.command('version', help="Show SKALE node CLI version")
+@cli.command('version', help='Show SKALE node CLI version')
@click.option('--short', is_flag=True)
def version(short):
if short:
@@ -64,9 +64,10 @@ def version(short):
print(f'SKALE Node CLI version: {VERSION}')
-@cli.command('info', help="Show SKALE node CLI info")
+@cli.command('info', help='Show SKALE node CLI info')
def info():
- print(inspect.cleandoc(f'''
+ print(
+ inspect.cleandoc(f"""
{LONG_LINE}
Version: {__version__}
Full version: {VERSION}
@@ -75,7 +76,8 @@ def info():
Commit: {COMMIT}
Git branch: {BRANCH}
{LONG_LINE}
- '''))
+ """)
+ )
def get_sources_list() -> List[click.MultiCommand]:
@@ -93,8 +95,7 @@ def get_sources_list() -> List[click.MultiCommand]:
wallet_cli,
ssl_cli,
exit_cli,
- validate_cli,
- lvmpy_cli
+ lvmpy_cli,
]
@@ -102,8 +103,7 @@ def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
- logger.error("Uncaught exception",
- exc_info=(exc_type, exc_value, exc_traceback))
+ logger.error('Uncaught exception', exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
@@ -123,5 +123,5 @@ def handle_exception(exc_type, exc_value, exc_traceback):
except Exception as err:
traceback.print_exc()
logger.debug('Execution time: %d seconds', time.time() - start_time)
- error_exit(err)
+ error_exit(err, CLIExitCodes.FAILURE)
logger.debug('Execution time: %d seconds', time.time() - start_time)
diff --git a/node_cli/operations/base.py b/node_cli/operations/base.py
index 540f0188..f0eaec08 100644
--- a/node_cli/operations/base.py
+++ b/node_cli/operations/base.py
@@ -32,17 +32,11 @@
from node_cli.core.node_options import NodeOptions
from node_cli.core.resources import update_resource_allocation, init_shared_space_volume
-from node_cli.operations.common import (
- backup_old_contracts,
- download_contracts,
- configure_filebeat,
- configure_flask,
- unpack_backup_archive
-)
+from node_cli.operations.common import configure_filebeat, configure_flask, unpack_backup_archive
from node_cli.operations.volume import (
cleanup_volume_artifacts,
ensure_filestorage_mapping,
- prepare_block_device
+ prepare_block_device,
)
from node_cli.operations.docker_lvmpy import lvmpy_install # noqa
from node_cli.operations.skale_node import download_skale_node, sync_skale_node, update_images
@@ -55,7 +49,7 @@
remove_dynamic_containers,
remove_schain_container,
start_admin,
- stop_admin
+ stop_admin,
)
from node_cli.utils.meta import get_meta_info, update_meta
from node_cli.utils.print_formatters import print_failed_requirements_checks
@@ -68,15 +62,12 @@
def checked_host(func):
@functools.wraps(func)
def wrapper(env_filepath: str, env: Dict, *args, **kwargs):
- download_skale_node(
- env['CONTAINER_CONFIGS_STREAM'],
- env.get('CONTAINER_CONFIGS_DIR')
- )
+ download_skale_node(env.get('CONTAINER_CONFIGS_STREAM'), env.get('CONTAINER_CONFIGS_DIR'))
failed_checks = run_host_checks(
env['DISK_MOUNTPOINT'],
env['ENV_TYPE'],
CONTAINER_CONFIG_TMP_PATH,
- check_type=CheckType.PREINSTALL
+ check_type=CheckType.PREINSTALL,
)
if failed_checks:
print_failed_requirements_checks(failed_checks)
@@ -90,7 +81,7 @@ def wrapper(env_filepath: str, env: Dict, *args, **kwargs):
env['DISK_MOUNTPOINT'],
env['ENV_TYPE'],
CONTAINER_CONFIG_PATH,
- check_type=CheckType.POSTINSTALL
+ check_type=CheckType.POSTINSTALL,
)
if failed_checks:
print_failed_requirements_checks(failed_checks)
@@ -114,17 +105,10 @@ def update(env_filepath: str, env: Dict) -> None:
enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
configure_nftables(enable_monitoring=enable_monitoring)
- backup_old_contracts()
- download_contracts(env)
-
lvmpy_install(env)
generate_nginx_config()
- prepare_host(
- env_filepath,
- env['ENV_TYPE'],
- allocation=True
- )
+ prepare_host(env_filepath, env['ENV_TYPE'], allocation=True)
init_shared_space_volume(env['ENV_TYPE'])
current_stream = get_meta_info().config_stream
@@ -133,7 +117,7 @@ def update(env_filepath: str, env: Dict) -> None:
logger.info(
'Stream version was changed from %s to %s',
current_stream,
- env['CONTAINER_CONFIGS_STREAM']
+ env['CONTAINER_CONFIGS_STREAM'],
)
docker_cleanup()
@@ -142,7 +126,7 @@ def update(env_filepath: str, env: Dict) -> None:
env['CONTAINER_CONFIGS_STREAM'],
env['DOCKER_LVMPY_STREAM'],
distro.id(),
- distro.version()
+ distro.version(),
)
update_images(env=env)
compose_up(env)
@@ -160,12 +144,8 @@ def init(env_filepath: str, env: dict) -> bool:
enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False'))
configure_nftables(enable_monitoring=enable_monitoring)
- prepare_host(
- env_filepath,
- env_type=env['ENV_TYPE']
- )
+ prepare_host(env_filepath, env_type=env['ENV_TYPE'])
link_env_file()
- download_contracts(env)
configure_filebeat()
configure_flask()
@@ -179,7 +159,7 @@ def init(env_filepath: str, env: dict) -> bool:
env['CONTAINER_CONFIGS_STREAM'],
env['DOCKER_LVMPY_STREAM'],
distro.id(),
- distro.version()
+ distro.version(),
)
update_resource_allocation(env_type=env['ENV_TYPE'])
update_images(env=env)
@@ -189,17 +169,10 @@ def init(env_filepath: str, env: dict) -> bool:
def init_sync(
- env_filepath: str,
- env: dict,
- archive: bool,
- historic_state: bool,
- snapshot_from: Optional[str]
+ env_filepath: str, env: dict, archive: bool, historic_state: bool, snapshot_from: Optional[str]
) -> bool:
cleanup_volume_artifacts(env['DISK_MOUNTPOINT'])
- download_skale_node(
- env.get('CONTAINER_CONFIGS_STREAM'),
- env.get('CONTAINER_CONFIGS_DIR')
- )
+ download_skale_node(env.get('CONTAINER_CONFIGS_STREAM'), env.get('CONTAINER_CONFIGS_DIR'))
sync_skale_node()
if env.get('SKIP_DOCKER_CONFIG') != 'True':
@@ -220,20 +193,16 @@ def init_sync(
ensure_filestorage_mapping()
link_env_file()
- download_contracts(env)
generate_nginx_config()
- prepare_block_device(
- env['DISK_MOUNTPOINT'],
- force=env['ENFORCE_BTRFS'] == 'True'
- )
+ prepare_block_device(env['DISK_MOUNTPOINT'], force=env['ENFORCE_BTRFS'] == 'True')
update_meta(
VERSION,
env['CONTAINER_CONFIGS_STREAM'],
env['DOCKER_LVMPY_STREAM'],
distro.id(),
- distro.version()
+ distro.version(),
)
update_resource_allocation(env_type=env['ENV_TYPE'])
@@ -251,10 +220,7 @@ def update_sync(env_filepath: str, env: Dict) -> bool:
compose_rm(env, sync_node=True)
remove_dynamic_containers()
cleanup_volume_artifacts(env['DISK_MOUNTPOINT'])
- download_skale_node(
- env['CONTAINER_CONFIGS_STREAM'],
- env.get('CONTAINER_CONFIGS_DIR')
- )
+ download_skale_node(env['CONTAINER_CONFIGS_STREAM'], env.get('CONTAINER_CONFIGS_DIR'))
sync_skale_node()
if env.get('SKIP_DOCKER_CONFIG') != 'True':
@@ -264,27 +230,18 @@ def update_sync(env_filepath: str, env: Dict) -> bool:
configure_nftables(enable_monitoring=enable_monitoring)
ensure_filestorage_mapping()
- backup_old_contracts()
- download_contracts(env)
- prepare_block_device(
- env['DISK_MOUNTPOINT'],
- force=env['ENFORCE_BTRFS'] == 'True'
- )
+ prepare_block_device(env['DISK_MOUNTPOINT'], force=env['ENFORCE_BTRFS'] == 'True')
generate_nginx_config()
- prepare_host(
- env_filepath,
- env['ENV_TYPE'],
- allocation=True
- )
+ prepare_host(env_filepath, env['ENV_TYPE'], allocation=True)
update_meta(
VERSION,
env['CONTAINER_CONFIGS_STREAM'],
env['DOCKER_LVMPY_STREAM'],
distro.id(),
- distro.version()
+ distro.version(),
)
update_images(env=env, sync_node=True)
@@ -306,7 +263,7 @@ def turn_on(env: dict) -> None:
env['CONTAINER_CONFIGS_STREAM'],
env['DOCKER_LVMPY_STREAM'],
distro.id(),
- distro.version()
+ distro.version(),
)
if env.get('SKIP_DOCKER_CONFIG') != 'True':
configure_docker()
@@ -324,7 +281,7 @@ def restore(env, backup_path, config_only=False):
env['DISK_MOUNTPOINT'],
env['ENV_TYPE'],
CONTAINER_CONFIG_PATH,
- check_type=CheckType.PREINSTALL
+ check_type=CheckType.PREINSTALL,
)
if failed_checks:
print_failed_requirements_checks(failed_checks)
@@ -347,7 +304,7 @@ def restore(env, backup_path, config_only=False):
env['CONTAINER_CONFIGS_STREAM'],
env['DOCKER_LVMPY_STREAM'],
distro.id(),
- distro.version()
+ distro.version(),
)
update_resource_allocation(env_type=env['ENV_TYPE'])
@@ -358,7 +315,7 @@ def restore(env, backup_path, config_only=False):
env['DISK_MOUNTPOINT'],
env['ENV_TYPE'],
CONTAINER_CONFIG_PATH,
- check_type=CheckType.POSTINSTALL
+ check_type=CheckType.POSTINSTALL,
)
if failed_checks:
print_failed_requirements_checks(failed_checks)
@@ -367,10 +324,7 @@ def restore(env, backup_path, config_only=False):
def repair_sync(
- schain_name: str,
- archive: bool,
- historic_state: bool,
- snapshot_from: Optional[str]
+ schain_name: str, archive: bool, historic_state: bool, snapshot_from: Optional[str]
) -> None:
stop_admin(sync_node=True)
remove_schain_container(schain_name=schain_name)
diff --git a/node_cli/operations/common.py b/node_cli/operations/common.py
index cfe79b42..7e484cdf 100644
--- a/node_cli/operations/common.py
+++ b/node_cli/operations/common.py
@@ -24,44 +24,23 @@
import shutil
import secrets
-import urllib.request
from shutil import copyfile
-from distutils.dir_util import copy_tree
from node_cli.configs import (
- CONTRACTS_PATH,
- BACKUP_CONTRACTS_PATH,
G_CONF_HOME,
FILEBEAT_CONFIG_PATH,
FLASK_SECRET_KEY_FILE,
- IMA_CONTRACTS_FILEPATH,
- MANAGER_CONTRACTS_FILEPATH,
- SRC_FILEBEAT_CONFIG_PATH
+ SRC_FILEBEAT_CONFIG_PATH,
)
logger = logging.getLogger(__name__)
-def backup_old_contracts():
- logging.info('Copying old contracts ABIs')
- copy_tree(CONTRACTS_PATH, BACKUP_CONTRACTS_PATH)
-
-
-def download_contracts(env):
- urllib.request.urlretrieve(env['MANAGER_CONTRACTS_ABI_URL'], MANAGER_CONTRACTS_FILEPATH)
- urllib.request.urlretrieve(env['IMA_CONTRACTS_ABI_URL'], IMA_CONTRACTS_FILEPATH)
-
-
def configure_filebeat():
logger.info('Configuring filebeat...')
copyfile(SRC_FILEBEAT_CONFIG_PATH, FILEBEAT_CONFIG_PATH)
shutil.chown(FILEBEAT_CONFIG_PATH, user='root')
- os.chmod(
- FILEBEAT_CONFIG_PATH,
- stat.S_IREAD |
- stat.S_IWRITE |
- stat.S_IEXEC
- )
+ os.chmod(FILEBEAT_CONFIG_PATH, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
logger.info('Filebeat configured')
diff --git a/node_cli/operations/docker_lvmpy.py b/node_cli/operations/docker_lvmpy.py
index 1810516b..6e28d58c 100644
--- a/node_cli/operations/docker_lvmpy.py
+++ b/node_cli/operations/docker_lvmpy.py
@@ -33,7 +33,7 @@
LVMPY_CRON_LOG_PATH,
LVMPY_CRON_SCHEDULE_MINUTES,
SCHAINS_MNT_DIR_REGULAR,
- VOLUME_GROUP
+ VOLUME_GROUP,
)
from lvmpy.src.install import setup as setup_lvmpy
@@ -57,20 +57,14 @@ def ensure_filestorage_mapping(mapping_dir=FILESTORAGE_MAPPING):
def sync_docker_lvmpy_repo(env):
if os.path.isdir(DOCKER_LVMPY_PATH):
shutil.rmtree(DOCKER_LVMPY_PATH)
- sync_repo(
- DOCKER_LVMPY_REPO_URL,
- DOCKER_LVMPY_PATH,
- env["DOCKER_LVMPY_STREAM"]
- )
+ sync_repo(DOCKER_LVMPY_REPO_URL, DOCKER_LVMPY_PATH, env['DOCKER_LVMPY_STREAM'])
def lvmpy_install(env):
ensure_filestorage_mapping()
logging.info('Configuring and starting lvmpy')
setup_lvmpy(
- block_device=env['DISK_MOUNTPOINT'],
- volume_group=VOLUME_GROUP,
- exec_start=LVMPY_RUN_CMD
+ block_device=env['DISK_MOUNTPOINT'], volume_group=VOLUME_GROUP, exec_start=LVMPY_RUN_CMD
)
init_healing_cron()
logger.info('docker-lvmpy is configured and started')
@@ -86,7 +80,5 @@ def init_healing_cron():
if legacy_line in jobs:
c.remove_all(command=legacy_line)
if cron_line not in jobs:
- job = c.new(
- command=cron_line
- )
+ job = c.new(command=cron_line)
job.minute.every(LVMPY_CRON_SCHEDULE_MINUTES)
diff --git a/node_cli/operations/skale_node.py b/node_cli/operations/skale_node.py
index d91e4765..fd3b6f8a 100644
--- a/node_cli/operations/skale_node.py
+++ b/node_cli/operations/skale_node.py
@@ -22,14 +22,11 @@
import shutil
from typing import Optional
-from node_cli.utils.helper import rm_dir, rsync_dirs, safe_mkdir
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.utils.helper import rm_dir, rsync_dirs, safe_mkdir, error_exit
from node_cli.utils.git_utils import clone_repo
from node_cli.utils.docker_utils import compose_pull, compose_build
-from node_cli.configs import (
- CONTAINER_CONFIG_PATH,
- CONTAINER_CONFIG_TMP_PATH,
- SKALE_NODE_REPO_URL
-)
+from node_cli.configs import CONTAINER_CONFIG_PATH, CONTAINER_CONFIG_TMP_PATH, SKALE_NODE_REPO_URL
logger = logging.getLogger(__name__)
@@ -43,17 +40,35 @@ def update_images(env: dict, sync_node: bool = False) -> None:
compose_pull(env=env, sync_node=sync_node)
-def download_skale_node(stream: Optional[str], src: Optional[str]) -> None:
- rm_dir(CONTAINER_CONFIG_TMP_PATH)
- safe_mkdir(CONTAINER_CONFIG_TMP_PATH)
- dest = CONTAINER_CONFIG_TMP_PATH
- if src:
- rsync_dirs(src, dest)
- else:
- clone_repo(
- SKALE_NODE_REPO_URL,
- CONTAINER_CONFIG_TMP_PATH,
- stream
+def download_skale_node(stream: Optional[str] = None, src: Optional[str] = None) -> None:
+ """Downloads SKALE node config from repo or local directory"""
+ if not src and not stream:
+ error_exit('Either src path or stream must be provided', exit_code=CLIExitCodes.FAILURE)
+
+ try:
+ rm_dir(CONTAINER_CONFIG_TMP_PATH)
+ safe_mkdir(CONTAINER_CONFIG_TMP_PATH)
+ dest = CONTAINER_CONFIG_TMP_PATH
+
+ if src:
+ if not os.path.isdir(src):
+ error_exit(
+ f'Source directory does not exist: {src}', exit_code=CLIExitCodes.FAILURE
+ )
+ logger.info(f'Syncing config files from {src}')
+ rsync_dirs(src, dest)
+ elif stream:
+ logger.info(f'Cloning config files from {SKALE_NODE_REPO_URL} ({stream})')
+ clone_repo(SKALE_NODE_REPO_URL, dest, stream)
+ else:
+ # Should never reach this point
+ error_exit('Either src path or stream must be provided', exit_code=CLIExitCodes.FAILURE)
+
+ except (OSError, RuntimeError) as err:
+ rm_dir(CONTAINER_CONFIG_TMP_PATH)
+ error_exit(
+ f'Failed to download node configuration: {err}',
+ exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR,
)
diff --git a/node_cli/utils/decorators.py b/node_cli/utils/decorators.py
index 95c822e8..f0d11e00 100644
--- a/node_cli/utils/decorators.py
+++ b/node_cli/utils/decorators.py
@@ -30,11 +30,14 @@
def check_not_inited(f):
+ """Decorator that checks if node is not already initialized."""
+
@wraps(f)
def inner(*args, **kwargs):
if is_node_inited():
error_exit(TEXTS['node']['already_inited'], exit_code=CLIExitCodes.NODE_STATE_ERROR)
return f(*args, **kwargs)
+
return inner
@@ -44,6 +47,7 @@ def inner(*args, **kwargs):
if not is_node_inited():
error_exit(TEXTS['node']['not_inited'], exit_code=CLIExitCodes.NODE_STATE_ERROR)
return f(*args, **kwargs)
+
return inner
@@ -53,8 +57,9 @@ def inner(*args, **kwargs):
if not is_user_valid():
g_conf_user = get_g_conf_user()
current_user = get_system_user()
- error_msg = f'You couldn\'t execute this command from user {current_user}. \
-Allowed: {g_conf_user} or root.'
+ error_msg = f"You couldn't execute this command from user {current_user}. \
+Allowed: {g_conf_user} or root."
error_exit(error_msg, exit_code=CLIExitCodes.BAD_USER_ERROR)
return f(*args, **kwargs)
+
return inner
diff --git a/node_cli/utils/git_utils.py b/node_cli/utils/git_utils.py
index be1fd49d..4adfebab 100644
--- a/node_cli/utils/git_utils.py
+++ b/node_cli/utils/git_utils.py
@@ -21,29 +21,65 @@
import logging
from git.repo.base import Repo
-from git.exc import GitCommandError
-
+from git.exc import GitCommandError, GitError
+from node_cli.utils.exit_codes import CLIExitCodes
+from node_cli.utils.helper import error_exit
logger = logging.getLogger(__name__)
def check_is_branch(repo: Repo, ref_name: str) -> bool:
+ """Check if the given reference name is a valid git branch."""
+ if not repo or not isinstance(repo, Repo):
+ raise ValueError('Invalid repository object')
+ if not ref_name or not isinstance(ref_name, str):
+ raise ValueError('Invalid reference name')
+
try:
+ # Verify if ref_name exists as a branch using git show-ref
repo.git.show_ref('--verify', f'refs/heads/{ref_name}')
- logger.debug(f'{ref_name} is branch')
+ logger.debug(f'{ref_name} is a branch')
return True
except GitCommandError:
- logger.debug(f'{ref_name} is not branch')
+ # Expected error when reference is not found
+ logger.debug(f'{ref_name} is not a branch')
return False
+ except GitError as e:
+ # Git-specific errors (permissions, config, etc)
+ logger.error(f'Git error checking branch: {str(e)}')
+ raise RuntimeError(f'Git error checking branch: {str(e)}') from e
+ except Exception as e:
+ # Unexpected system errors
+ logger.error(f'Unexpected error checking branch: {str(e)}')
+ raise RuntimeError(f'Unexpected error checking branch: {str(e)}') from e
def clone_repo(repo_url: str, repo_path: str, ref_name: str) -> None:
- logger.info(f'Cloning {repo_url} → {repo_path}')
- Repo.clone_from(repo_url, repo_path)
- fetch_pull_repo(repo_path, ref_name)
+ """Clone a git repository and checkout specified reference."""
+ if not all([repo_url, repo_path, ref_name]):
+ error_exit('Empty repository URL, path or reference', CLIExitCodes.FAILURE)
+ if not all(isinstance(x, str) for x in [repo_url, repo_path, ref_name]):
+ error_exit('Invalid input types', CLIExitCodes.FAILURE)
+
+ try:
+ logger.info(f'Cloning {repo_url} → {repo_path}')
+ Repo.clone_from(repo_url, repo_path)
+ fetch_pull_repo(repo_path, ref_name)
+ except GitError as e:
+ error_exit(
+ f'Git error cloning repository: {str(e)}', CLIExitCodes.OPERATION_EXECUTION_ERROR
+ )
+ except Exception as e:
+ error_exit(f'Unexpected error cloning repository: {str(e)}', CLIExitCodes.FAILURE)
def sync_repo(repo_url: str, repo_path: str, ref_name: str) -> None:
+ """Sync Git repository by cloning if not exists or fetching latest changes."""
+ if not all([repo_url, repo_path, ref_name]):
+ error_exit('Empty repository URL, path or reference', CLIExitCodes.FAILURE)
+ if not all(isinstance(x, str) for x in [repo_url, repo_path, ref_name]):
+ error_exit('Invalid input types', CLIExitCodes.FAILURE)
+
logger.info(f'Sync repo {repo_url} → {repo_path}')
if not os.path.isdir(os.path.join(repo_path, '.git')):
clone_repo(repo_url, repo_path, ref_name)
@@ -52,11 +88,32 @@ def sync_repo(repo_url: str, repo_path: str, ref_name: str) -> None:
def fetch_pull_repo(repo_path: str, ref_name: str) -> None:
- repo = Repo(repo_path)
- repo_name = os.path.basename(repo.working_dir)
- logger.info(f'Fetching {repo_name} changes')
- repo.remotes.origin.fetch()
- logger.info(f'Checkouting {repo_path} to {ref_name}')
- repo.git.checkout(ref_name)
- if check_is_branch(repo, ref_name):
- repo.remotes.origin.pull()
+ """Fetch latest changes and checkout/pull specific git reference."""
+ # Validate inputs
+ if not repo_path or not isinstance(repo_path, str):
+ error_exit('Invalid repository path', CLIExitCodes.FAILURE)
+ if not ref_name or not isinstance(ref_name, str):
+ error_exit('Invalid reference name', CLIExitCodes.FAILURE)
+
+ try:
+ # Initialize repo and get name for logging
+ repo = Repo(repo_path)
+ repo_name = os.path.basename(repo.working_dir)
+
+ # Fetch latest changes
+ logger.info(f'Fetching latest changes for {repo_name}')
+ repo.remotes.origin.fetch()
+
+ # Checkout specified reference
+ logger.info(f'Checking out {ref_name} in {repo_name}')
+ repo.git.checkout(ref_name)
+
+ # Pull latest changes if ref is a branch
+ if check_is_branch(repo, ref_name):
+ logger.info(f'Pulling latest changes for branch {ref_name}')
+ repo.remotes.origin.pull()
+
+ except GitError as e:
+ error_exit(f'Git operation failed: {str(e)}', CLIExitCodes.OPERATION_EXECUTION_ERROR)
+ except Exception as e:
+ error_exit(f'Repository operation failed: {str(e)}', CLIExitCodes.FAILURE)
diff --git a/node_cli/utils/helper.py b/node_cli/utils/helper.py
index 39de440a..dbff2315 100644
--- a/node_cli/utils/helper.py
+++ b/node_cli/utils/helper.py
@@ -25,7 +25,7 @@
import sys
import uuid
from urllib.parse import urlparse
-from typing import Optional
+from typing import Any, Optional
import yaml
import shutil
@@ -49,20 +49,26 @@
from node_cli.utils.print_formatters import print_err_response
from node_cli.utils.exit_codes import CLIExitCodes
-from node_cli.configs.env import (
- absent_params as absent_env_params,
- get_env_config
-)
from node_cli.configs import (
- TEXT_FILE, ADMIN_HOST, ADMIN_PORT, HIDE_STREAM_LOG, GLOBAL_SKALE_DIR,
- GLOBAL_SKALE_CONF_FILEPATH, DEFAULT_SSH_PORT
+ TEXT_FILE,
+ ADMIN_HOST,
+ ADMIN_PORT,
+ HIDE_STREAM_LOG,
+ GLOBAL_SKALE_DIR,
+ GLOBAL_SKALE_CONF_FILEPATH,
+ DEFAULT_SSH_PORT,
)
from node_cli.configs.routes import get_route
from node_cli.utils.global_config import read_g_config, get_system_user
from node_cli.configs.cli_logger import (
- FILE_LOG_FORMAT, LOG_BACKUP_COUNT, LOG_FILE_SIZE_BYTES,
- LOG_FILEPATH, STREAM_LOG_FORMAT, DEBUG_LOG_FILEPATH)
+ FILE_LOG_FORMAT,
+ LOG_BACKUP_COUNT,
+ LOG_FILE_SIZE_BYTES,
+ LOG_FILEPATH,
+ STREAM_LOG_FORMAT,
+ DEBUG_LOG_FILEPATH,
+)
logger = logging.getLogger(__name__)
@@ -71,7 +77,7 @@
DEFAULT_ERROR_DATA = {
'status': 'error',
- 'payload': 'Request failed. Check skale_api container logs'
+ 'payload': 'Request failed. Check skale_api container logs',
}
@@ -100,14 +106,7 @@ def init_file(path, content=None):
write_json(path, content)
-def run_cmd(
- cmd,
- env={},
- shell=False,
- secure=False,
- check_code=True,
- separate_stderr=False
-):
+def run_cmd(cmd, env={}, shell=False, secure=False, check_code=True, separate_stderr=False):
if not secure:
logger.debug(f'Running: {cmd}')
else:
@@ -115,13 +114,7 @@ def run_cmd(
stdout, stderr = subprocess.PIPE, subprocess.PIPE
if not separate_stderr:
stderr = subprocess.STDOUT
- res = subprocess.run(
- cmd,
- shell=shell,
- stdout=stdout,
- stderr=stderr,
- env={**env, **os.environ}
- )
+ res = subprocess.run(cmd, shell=shell, stdout=stdout, stderr=stderr, env={**env, **os.environ})
if check_code:
output = res.stdout.decode('utf-8')
if res.returncode:
@@ -152,7 +145,7 @@ def process_template(source, destination, data):
"""
template = read_file(source)
processed_template = Environment().from_string(template).render(data)
- with open(destination, "w") as f:
+ with open(destination, 'w') as f:
f.write(processed_template)
@@ -160,26 +153,28 @@ def get_username():
return os.environ.get('USERNAME') or os.environ.get('USER')
-def extract_env_params(env_filepath, sync_node=False, raise_for_status=True):
- env_params = get_env_config(env_filepath, sync_node=sync_node)
- absent_params = ', '.join(absent_env_params(env_params))
- if absent_params:
- click.echo(f"Your env file({env_filepath}) have some absent params: "
- f"{absent_params}.\n"
- f"You should specify them to make sure that "
- f"all services are working",
- err=True)
- if raise_for_status:
- raise InvalidEnvFileError(f'Missing params: {absent_params}')
- return None
- return env_params
-
-
def str_to_bool(val):
return bool(distutils.util.strtobool(val))
-def error_exit(error_payload, exit_code=CLIExitCodes.FAILURE):
+def error_exit(error_payload: Any, exit_code: CLIExitCodes = CLIExitCodes.FAILURE) -> None:
+ """Print error message and exit the program with specified exit code.
+
+ Args:
+ error_payload: Error message string or list of error messages
+ exit_code: Exit code to use when terminating the program (default: FAILURE)
+
+ Raises:
+ TypeError: If exit_code is not CLIExitCodes
+
+ Example:
+ >>> error_exit("Permission denied", CLIExitCodes.BAD_USER_ERROR)
+ Permission denied
+
+ """
+ if not isinstance(exit_code, CLIExitCodes):
+ raise TypeError('exit_code must be CLIExitCodes enum')
+
print_err_response(error_payload)
sys.exit(exit_code.value)
@@ -260,7 +255,7 @@ def download_dump(path, container_name=None):
error_exit(r.json())
return None
d = r.headers['Content-Disposition']
- fname_q = re.findall("filename=(.+)", d)[0]
+ fname_q = re.findall('filename=(.+)', d)[0]
fname = fname_q.replace('"', '')
filepath = os.path.join(path, fname)
with open(filepath, 'wb') as f:
@@ -271,8 +266,7 @@ def download_dump(path, container_name=None):
def init_default_logger():
f_handler = get_file_handler(LOG_FILEPATH, logging.INFO)
debug_f_handler = get_file_handler(DEBUG_LOG_FILEPATH, logging.DEBUG)
- logging.basicConfig(
- level=logging.DEBUG, handlers=[f_handler, debug_f_handler])
+ logging.basicConfig(level=logging.DEBUG, handlers=[f_handler, debug_f_handler])
def get_stream_handler():
@@ -286,8 +280,8 @@ def get_stream_handler():
def get_file_handler(log_filepath, log_level):
formatter = Formatter(FILE_LOG_FORMAT)
f_handler = py_handlers.RotatingFileHandler(
- log_filepath, maxBytes=LOG_FILE_SIZE_BYTES,
- backupCount=LOG_BACKUP_COUNT)
+ log_filepath, maxBytes=LOG_FILE_SIZE_BYTES, backupCount=LOG_BACKUP_COUNT
+ )
f_handler.setFormatter(formatter)
f_handler.setLevel(log_level)
@@ -304,27 +298,15 @@ def to_camel_case(snake_str):
return components[0] + ''.join(x.title() for x in components[1:])
-def validate_abi(abi_filepath: str) -> dict:
- if not os.path.isfile(abi_filepath):
- return {'filepath': abi_filepath,
- 'status': 'error',
- 'msg': 'No such file'}
- try:
- with open(abi_filepath) as abi_file:
- json.load(abi_file)
- except Exception:
- return {'filepath': abi_filepath, 'status': 'error',
- 'msg': 'Failed to load abi file as json'}
- return {'filepath': abi_filepath, 'status': 'ok', 'msg': ''}
-
-
def streamed_cmd(func):
- """ Decorator that allow function to print logs into stderr """
+ """Decorator that allow function to print logs into stderr"""
+
@wraps(func)
def wrapper(*args, **kwargs):
if HIDE_STREAM_LOG is None:
logging.getLogger('').addHandler(get_stream_handler())
return func(*args, **kwargs)
+
return wrapper
@@ -349,27 +331,70 @@ def get_g_conf_home():
def rm_dir(folder: str) -> None:
- if os.path.exists(folder):
- logger.info(f'{folder} exists, removing...')
- shutil.rmtree(folder)
- else:
- logger.info(f'{folder} doesn\'t exist, skipping...')
+ """Safely remove a directory and all its contents, if it exists."""
+ if not isinstance(folder, str):
+ error_exit(f'folder must be a string, got {type(folder)}', exit_code=CLIExitCodes.FAILURE)
+
+ try:
+ if os.path.exists(folder):
+ logger.info(f'Directory {folder} exists, removing...')
+ shutil.rmtree(folder, ignore_errors=False)
+ else:
+ logger.info(f"Directory {folder} doesn't exist, skipping...")
+ except OSError as e:
+ logger.error(f'Failed to remove directory {folder}: {e}')
+ error_exit(
+ f'Failed to remove directory {folder}: {e}',
+ exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR,
+ )
+
+def safe_mkdir(path: str, print_res: bool = False) -> None:
+ """Create a directory if it doesn't exist."""
+ if not isinstance(path, str):
+ error_exit(f'path must be a string, got {type(path)}', exit_code=CLIExitCodes.FAILURE)
-def safe_mkdir(path: str, print_res: bool = False):
if os.path.exists(path):
+ logger.debug(f'Directory {path} already exists')
return
+
msg = f'Creating {path} directory...'
logger.info(msg)
if print_res:
print(msg)
- os.makedirs(path, exist_ok=True)
+
+ try:
+ os.makedirs(path, exist_ok=True)
+ except OSError as e:
+ logger.error(f'Failed to create directory {path}: {e}')
+ error_exit(
+ f'Failed to create directory {path}: {e}',
+ exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR,
+ )
def rsync_dirs(src: str, dest: str) -> None:
- logger.info(f'Syncing {dest} with {src}')
- run_cmd(['rsync', '-r', f'{src}/', dest])
- run_cmd(['rsync', '-r', f'{src}/.git', dest])
+ """Synchronize two directories using rsync."""
+ if not isinstance(src, str) or not isinstance(dest, str):
+ error_exit('Source and destination paths must be strings', exit_code=CLIExitCodes.FAILURE)
+
+ if not src.strip() or not dest.strip():
+ error_exit('Source and destination paths cannot be empty', exit_code=CLIExitCodes.FAILURE)
+
+ if not os.path.isdir(src):
+ error_exit(f'Source directory does not exist: {src}', exit_code=CLIExitCodes.FAILURE)
+
+ logger.info(f'Syncing directory {dest} with {src}')
+
+ try:
+ # Sync all files including hidden ones
+ run_cmd(['rsync', '-r', f'{src}/', dest])
+ run_cmd(['rsync', '-r', f'{src}/.git', dest])
+ except subprocess.CalledProcessError as e:
+ logger.error(f'Rsync failed: {e}')
+ error_exit(
+ f'Failed to sync directories: {e}', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR
+ )
def ok_result(payload: dict = None):
@@ -387,8 +412,7 @@ def convert(self, value, param, ctx):
try:
result = urlparse(value)
except ValueError:
- self.fail(f'Some characters are not allowed in {value}',
- param, ctx)
+ self.fail(f'Some characters are not allowed in {value}', param, ctx)
if not all([result.scheme, result.netloc]):
self.fail(f'Expected valid url. Got {value}', param, ctx)
return value
@@ -401,8 +425,7 @@ def convert(self, value, param, ctx):
try:
ipaddress.ip_address(value)
except ValueError:
- self.fail(f'expected valid ipv4/ipv6 address. Got {value}',
- param, ctx)
+ self.fail(f'expected valid ipv4/ipv6 address. Got {value}', param, ctx)
return value
@@ -422,3 +445,8 @@ def get_ssh_port(ssh_service_name='ssh'):
except OSError:
logger.exception('Cannot get ssh service port')
return DEFAULT_SSH_PORT
+
+
+# TODO: Add a more robust check for contract address and see if doesn't conflict with alias.
+def is_contract_address(value: str) -> bool:
+ return len(value) == 42 and value.startswith('0x')
diff --git a/node_cli/utils/meta.py b/node_cli/utils/meta.py
index 69078af2..94e9581b 100644
--- a/node_cli/utils/meta.py
+++ b/node_cli/utils/meta.py
@@ -11,16 +11,18 @@
class CliMeta(
- namedtuple(
- 'Node',
- ['version', 'config_stream', 'docker_lvmpy_stream', 'os_id', 'os_version']
- )
+ namedtuple('Node', ['version', 'config_stream', 'docker_lvmpy_stream', 'os_id', 'os_version'])
):
__slots__ = ()
- def __new__(cls, version=DEFAULT_VERSION, config_stream=DEFAULT_CONFIG_STREAM,
- docker_lvmpy_stream=DEFAULT_DOCKER_LVMPY_STREAM, os_id=DEFAULT_OS_ID,
- os_version=DEFAULT_OS_VERSION):
+ def __new__(
+ cls,
+ version=DEFAULT_VERSION,
+ config_stream=DEFAULT_CONFIG_STREAM,
+ docker_lvmpy_stream=DEFAULT_DOCKER_LVMPY_STREAM,
+ os_id=DEFAULT_OS_ID,
+ os_version=DEFAULT_OS_VERSION,
+ ):
return super(CliMeta, cls).__new__(
cls, version, config_stream, docker_lvmpy_stream, os_id, os_version
)
@@ -42,10 +44,13 @@ def save_meta(meta: CliMeta) -> None:
def compose_default_meta() -> CliMeta:
- return CliMeta(version=DEFAULT_VERSION,
- docker_lvmpy_stream=DEFAULT_DOCKER_LVMPY_STREAM,
- config_stream=DEFAULT_CONFIG_STREAM, os_id=DEFAULT_OS_ID,
- os_version=DEFAULT_OS_VERSION)
+ return CliMeta(
+ version=DEFAULT_VERSION,
+ docker_lvmpy_stream=DEFAULT_DOCKER_LVMPY_STREAM,
+ config_stream=DEFAULT_CONFIG_STREAM,
+ os_id=DEFAULT_OS_ID,
+ os_version=DEFAULT_OS_VERSION,
+ )
def ensure_meta(meta: CliMeta = None) -> None:
@@ -54,8 +59,9 @@ def ensure_meta(meta: CliMeta = None) -> None:
save_meta(meta)
-def update_meta(version: str, config_stream: str,
- docker_lvmpy_stream: str, os_id: str, os_version: str) -> None:
+def update_meta(
+ version: str, config_stream: str, docker_lvmpy_stream: str, os_id: str, os_version: str
+) -> None:
ensure_meta()
meta = CliMeta(version, config_stream, docker_lvmpy_stream, os_id, os_version)
save_meta(meta)
diff --git a/node_cli/utils/print_formatters.py b/node_cli/utils/print_formatters.py
index 72225db9..844cc949 100644
--- a/node_cli/utils/print_formatters.py
+++ b/node_cli/utils/print_formatters.py
@@ -20,6 +20,7 @@
import os
import json
import datetime
+from typing import Any
import texttable
from dateutil import parser
@@ -34,13 +35,15 @@
def print_wallet_info(wallet):
- print(inspect.cleandoc(f'''
+ print(
+ inspect.cleandoc(f"""
{LONG_LINE}
Address: {wallet['address'].lower()}
ETH balance: {wallet['eth_balance']} ETH
SKALE balance: {wallet['skale_balance']} SKALE
{LONG_LINE}
- '''))
+ """)
+ )
def get_tty_width():
@@ -63,31 +66,21 @@ def table(self, headers, rows):
def format_date(date):
- return date.strftime("%b %d %Y %H:%M:%S")
+ return date.strftime('%b %d %Y %H:%M:%S')
def print_containers(containers):
- headers = [
- 'Name',
- 'Status',
- 'Started At',
- 'Image'
- ]
+ headers = ['Name', 'Status', 'Started At', 'Image']
rows = []
for container in containers:
- date = parser.parse(container["state"]["StartedAt"])
- status = container["state"]["Status"].capitalize()
+ date = parser.parse(container['state']['StartedAt'])
+ status = container['state']['Status'].capitalize()
if not container['state']['Running']:
- finished_date = parser.parse(container["state"]["FinishedAt"])
+ finished_date = parser.parse(container['state']['FinishedAt'])
status = f'{status} ({format_date(finished_date)})'
- rows.append([
- container['name'],
- status,
- format_date(date),
- container['image']
- ])
+ rows.append([container['name'], status, format_date(date), container['image']])
print(Formatter().table(headers, rows))
@@ -106,38 +99,29 @@ def print_schains(schains):
rows = []
for schain in schains:
date = datetime.datetime.fromtimestamp(schain['start_date'])
- rows.append([
- schain['name'],
- schain['mainnet_owner'],
- schain['part_of_node'],
- schain['lifetime'],
- format_date(date),
- schain['deposit'],
- schain['generation'],
- schain['originator'],
- schain['options']['allocation_type']
- ])
+ rows.append(
+ [
+ schain['name'],
+ schain['mainnet_owner'],
+ schain['part_of_node'],
+ schain['lifetime'],
+ format_date(date),
+ schain['deposit'],
+ schain['generation'],
+ schain['originator'],
+ schain['options']['allocation_type'],
+ ]
+ )
print(Formatter().table(headers, rows))
def print_dkg_statuses(statuses):
- headers = [
- 'sChain Name',
- 'DKG Status',
- 'Added At',
- 'sChain Status'
- ]
+ headers = ['sChain Name', 'DKG Status', 'Added At', 'sChain Status']
rows = []
for status in statuses:
date = datetime.datetime.fromtimestamp(status['added_at'])
- schain_status = 'Deleted' \
- if status['is_deleted'] else 'Exists'
- rows.append([
- status['name'],
- status['dkg_status_name'],
- format_date(date),
- schain_status
- ])
+ schain_status = 'Deleted' if status['is_deleted'] else 'Exists'
+ rows.append([status['name'], status['dkg_status_name'], format_date(date), schain_status])
print(Formatter().table(headers, rows))
@@ -152,23 +136,25 @@ def print_schains_healthchecks(schains):
'IMA',
'Firewall',
'RPC',
- 'Blocks'
+ 'Blocks',
]
rows = []
for schain in schains:
healthchecks = schain['healthchecks']
- rows.append([
- schain['name'],
- healthchecks['config_dir'],
- healthchecks['dkg'],
- healthchecks['config'],
- healthchecks['volume'],
- healthchecks['skaled_container'],
- healthchecks.get('ima_container', 'No IMA'),
- healthchecks['firewall_rules'],
- healthchecks['rpc'],
- healthchecks['blocks']
- ])
+ rows.append(
+ [
+ schain['name'],
+ healthchecks['config_dir'],
+ healthchecks['dkg'],
+ healthchecks['config'],
+ healthchecks['volume'],
+ healthchecks['skaled_container'],
+ healthchecks.get('ima_container', 'No IMA'),
+ healthchecks['firewall_rules'],
+ healthchecks['rpc'],
+ healthchecks['blocks'],
+ ]
+ )
print(Formatter().table(headers, rows))
@@ -187,19 +173,11 @@ def print_schains_logs(schains_logs):
def print_log_list(logs):
- headers = [
- 'Name',
- 'Size',
- 'Created At'
- ]
+ headers = ['Name', 'Size', 'Created At']
rows = []
for log in logs:
date = datetime.datetime.fromtimestamp(log['created_at'])
- rows.append([
- log['name'],
- log['size'],
- format_date(date)
- ])
+ rows.append([log['name'], log['size'], format_date(date)])
print(Formatter().table(headers, rows))
@@ -209,10 +187,7 @@ def print_dict(title, rows, headers=['Key', 'Value']):
def print_exit_status(exit_status_info):
- headers = [
- 'Schain name',
- 'Status'
- ]
+ headers = ['Schain name', 'Status']
logs = exit_status_info['data']
node_exit_status = exit_status_info['status'].lower()
rows = [[log['name'], log['status'].lower()] for log in logs]
@@ -230,20 +205,14 @@ def print_firewall_rules(rules, raw=False):
print('No allowed endpoints')
return
if raw:
- print(json.dumpes(rules))
- headers = [
- 'IP range',
- 'Port'
- ]
+ print(json.dumps(rules))
+ headers = ['IP range', 'Port']
rows = []
for rule in sorted(rules, key=lambda r: r['port']):
ip_range = 'All IPs'
- if rule["first_ip"] and rule["last_ip"]:
+ if rule['first_ip'] and rule['last_ip']:
ip_range = f'{rule["first_ip"]} - {rule["last_ip"]}'
- rows.append([
- ip_range,
- rule['port']
- ])
+ rows.append([ip_range, rule['port']])
print(Formatter().table(headers, rows))
@@ -256,24 +225,13 @@ def print_schain_info(info: dict, raw: bool = False) -> None:
print(Formatter().table(headers, [rows]))
-def print_abi_validation_errors(info: list, raw: bool = False) -> None:
- if not info:
- return
- if raw:
- print(json.dumps(info))
- else:
- headers = info[0].keys()
- rows = [tuple(r.values()) for r in info]
- headers = list(map(lambda h: h.capitalize(), headers))
- print(Formatter().table(headers, rows))
-
-
def print_node_cmd_error():
print(TEXTS['node']['cmd_failed'].format(DEBUG_LOG_FILEPATH))
def print_node_info(node, node_status):
- print(inspect.cleandoc(f"""
+ print(
+ inspect.cleandoc(f"""
{LONG_LINE}
Node info
Name: {node['name']}
@@ -284,20 +242,41 @@ def print_node_info(node, node_status):
Domain name: {node['domain_name']}
Status: {node_status}
{LONG_LINE}
- """))
-
-
-def print_err_response(error_payload):
- if isinstance(error_payload, list):
- error_msg = '\n'.join(error_payload)
- else:
- error_msg = error_payload
-
- print('Command failed with following errors:')
- print(LONG_LINE)
- print(error_msg)
- print(LONG_LINE)
- print(f'You can find more info in {DEBUG_LOG_FILEPATH}')
+ """)
+ )
+
+
+def print_err_response(error_payload: Any) -> None:
+ """Print formatted error message from API response payload.
+
+ Handles different types of error payloads (str, list, dict etc.) and formats them
+ into a user-friendly error message along with debug log file location.
+ """
+ try:
+ if isinstance(error_payload, (list, tuple)):
+ # Join list items with newlines for multiple errors
+ error_msg = '\n'.join(str(err) for err in error_payload)
+ elif isinstance(error_payload, dict):
+ # Format dict as JSON string
+ error_msg = json.dumps(error_payload, indent=2)
+ else:
+ # Convert any other type to string
+ error_msg = str(error_payload)
+
+ print('Command failed with following errors:')
+ print(LONG_LINE)
+ print(error_msg)
+ print(LONG_LINE)
+ print(f'You can find more info in {DEBUG_LOG_FILEPATH}')
+
+ except Exception as e:
+ # Fallback for unexpected errors while formatting
+ print('Error occurred while processing error payload:')
+ print(LONG_LINE)
+ print(f'Original error payload: {error_payload}')
+ print(f'Error while formatting: {str(e)}')
+ print(LONG_LINE)
+ print(f'Check logs at {DEBUG_LOG_FILEPATH} for more details')
def print_failed_requirements_checks(failed_checks: list) -> None:
@@ -313,10 +292,12 @@ def print_failed_requirements_checks(failed_checks: list) -> None:
def print_meta_info(meta_info: CliMeta) -> None:
- print(inspect.cleandoc(f"""
+ print(
+ inspect.cleandoc(f"""
{LONG_LINE}
Version: {meta_info.version}
Config Stream: {meta_info.config_stream}
Lvmpy stream: {meta_info.docker_lvmpy_stream}
{LONG_LINE}
- """))
+ """)
+ )
diff --git a/ruff.toml b/ruff.toml
new file mode 100644
index 00000000..90d9d4f6
--- /dev/null
+++ b/ruff.toml
@@ -0,0 +1,4 @@
+line-length = 100
+
+[format]
+quote-style = "single"
\ No newline at end of file
diff --git a/scripts/build.sh b/scripts/build.sh
index 3f334169..624fcdf4 100755
--- a/scripts/build.sh
+++ b/scripts/build.sh
@@ -33,19 +33,9 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
PARENT_DIR="$(dirname "$DIR")"
OS=`uname -s`-`uname -m`
-#CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
-LATEST_COMMIT=$(git rev-parse HEAD)
-CURRENT_DATETIME="`date "+%Y-%m-%d %H:%M:%S"`";
-DIST_INFO_FILEPATH=$PARENT_DIR/node_cli/cli/info.py
-
-touch $DIST_INFO_FILEPATH
-
-echo "BUILD_DATETIME = '$CURRENT_DATETIME'" > $DIST_INFO_FILEPATH
-echo "COMMIT = '$LATEST_COMMIT'" >> $DIST_INFO_FILEPATH
-echo "BRANCH = '$BRANCH'" >> $DIST_INFO_FILEPATH
-echo "OS = '$OS'" >> $DIST_INFO_FILEPATH
-echo "VERSION = '$VERSION'" >> $DIST_INFO_FILEPATH
-echo "TYPE = '$TYPE'" >> $DIST_INFO_FILEPATH
+
+# Use the new generate_info.sh script
+"${DIR}/generate_info.sh" "$VERSION" "$BRANCH" "$TYPE"
if [ "$TYPE" = "sync" ]; then
EXECUTABLE_NAME=skale-$VERSION-$OS-sync
diff --git a/scripts/generate_info.sh b/scripts/generate_info.sh
new file mode 100755
index 00000000..d554712a
--- /dev/null
+++ b/scripts/generate_info.sh
@@ -0,0 +1,42 @@
+#!/usr/bin/env bash
+set -e
+
+VERSION=$1
+BRANCH=$2
+TYPE=$3
+
+USAGE_MSG='Usage: generate_info.sh [VERSION] [BRANCH] [TYPE]'
+
+if [ -z "$VERSION" ]; then
+ (>&2 echo 'You should provide version')
+ echo $USAGE_MSG
+ exit 1
+fi
+if [ -z "$BRANCH" ]; then
+ (>&2 echo 'You should provide git branch')
+ echo $USAGE_MSG
+ exit 1
+fi
+if [ -z "$TYPE" ]; then
+ (>&2 echo 'You should provide type: normal or sync')
+ echo $USAGE_MSG
+ exit 1
+fi
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+PARENT_DIR="$(dirname "$DIR")"
+DIST_INFO_FILEPATH=$PARENT_DIR/node_cli/cli/info.py
+
+LATEST_COMMIT=$(git rev-parse HEAD)
+CURRENT_DATETIME="$(date "+%Y-%m-%d %H:%M:%S")"
+OS="$(uname -s)-$(uname -m)"
+
+rm -f "$DIST_INFO_FILEPATH"
+touch "$DIST_INFO_FILEPATH"
+
+echo "BUILD_DATETIME = '$CURRENT_DATETIME'" >> "$DIST_INFO_FILEPATH"
+echo "COMMIT = '$LATEST_COMMIT'" >> "$DIST_INFO_FILEPATH"
+echo "BRANCH = '$BRANCH'" >> "$DIST_INFO_FILEPATH"
+echo "OS = '$OS'" >> "$DIST_INFO_FILEPATH"
+echo "VERSION = '$VERSION'" >> "$DIST_INFO_FILEPATH"
+echo "TYPE = '$TYPE'" >> "$DIST_INFO_FILEPATH"
diff --git a/setup.py b/setup.py
index f335705d..02ee5fb6 100644
--- a/setup.py
+++ b/setup.py
@@ -5,14 +5,13 @@
def read(*parts):
path = os.path.join(os.path.dirname(__file__), *parts)
- f = open(path, "r")
+ f = open(path, 'r')
return f.read()
def find_version(*file_paths):
version_file = read(*file_paths)
- version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
- version_file, re.M)
+ version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Couldn't parse version from file.")
@@ -20,29 +19,28 @@ def find_version(*file_paths):
extras_require = {
'linter': [
- "flake8==7.1.1",
- "isort>=4.2.15,<5.10.2",
+ 'flake8==7.1.1',
+ 'isort>=4.2.15,<5.10.2',
+ 'ruff==0.9.9',
],
'dev': [
- "bumpversion==0.6.0",
- "pytest==8.3.2",
- "pytest-cov==5.0.0",
- "twine==4.0.2",
- "mock==4.0.3",
- "freezegun==1.2.2"
- ]
+ 'bumpversion==0.6.0',
+ 'pytest==8.3.2',
+ 'pytest-cov==5.0.0',
+ 'twine==4.0.2',
+ 'mock==4.0.3',
+ 'freezegun==1.2.2',
+ ],
}
-extras_require['dev'] = (
- extras_require['linter'] + extras_require['dev']
-)
+extras_require['dev'] = extras_require['linter'] + extras_require['dev']
setup(
name='node-cli',
# *IMPORTANT*: Don't manually change the version here.
# Use the 'bumpversion' utility instead.
- version=find_version("node_cli", "cli", "__init__.py"),
+ version=find_version('node_cli', 'cli', '__init__.py'),
include_package_data=True,
description='SKALE client tools',
long_description_markdown_filename='README.md',
@@ -50,33 +48,33 @@ def find_version(*file_paths):
author_email='support@skalelabs.com',
url='https://github.com/skalenetwork/node-cli',
install_requires=[
- "click==8.1.7",
- "PyInstaller==5.12.0",
- "distro==1.9.0",
- "docker==6.0.1",
- "texttable==1.6.7",
- "python-dateutil==2.8.2",
- "Jinja2==3.1.4",
- "psutil==5.9.4",
- "python-dotenv==0.21.0",
- "terminaltables==3.1.10",
- "requests==2.28.1",
- "GitPython==3.1.41",
- "packaging==23.0",
- "python-debian==0.1.49",
- "PyYAML==6.0",
- "pyOpenSSL==24.2.1",
- "MarkupSafe==3.0.2",
+ 'click==8.1.7',
+ 'PyInstaller==5.12.0',
+ 'distro==1.9.0',
+ 'docker==6.0.1',
+ 'texttable==1.6.7',
+ 'python-dateutil==2.8.2',
+ 'Jinja2==3.1.4',
+ 'psutil==5.9.4',
+ 'python-dotenv==0.21.0',
+ 'terminaltables==3.1.10',
+ 'requests==2.28.1',
+ 'GitPython==3.1.41',
+ 'packaging==23.0',
+ 'python-debian==0.1.49',
+ 'PyYAML==6.0',
+ 'pyOpenSSL==24.2.1',
+ 'MarkupSafe==3.0.2',
'Flask==2.3.3',
'itsdangerous==2.1.2',
- "cryptography==42.0.4",
- "filelock==3.0.12",
+ 'cryptography==42.0.4',
+ 'filelock==3.0.12',
'sh==1.14.2',
- 'python-crontab==2.6.0'
+ 'python-crontab==2.6.0',
+ 'requests-mock==1.12.1',
],
python_requires='>=3.8,<4',
extras_require=extras_require,
-
keywords=['skale', 'cli'],
packages=find_packages(exclude=['tests']),
classifiers=[
diff --git a/tests/cli/node_test.py b/tests/cli/node_test.py
index 7e206db1..9caf17d1 100644
--- a/tests/cli/node_test.py
+++ b/tests/cli/node_test.py
@@ -83,7 +83,10 @@ def test_register_node_with_error(resource_alloc, mocked_g_config):
['--name', 'test-node2', '--ip', '0.0.0.0', '--port', '80', '-d', 'skale.test'],
)
assert result.exit_code == 3
- assert (result.output == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n') # noqa
+ assert (
+ result.output
+ == f'Command failed with following errors:\n--------------------------------------------------\nStrange error\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n' # noqa
+ )
def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config):
@@ -97,7 +100,10 @@ def test_register_node_with_prompted_ip(resource_alloc, mocked_g_config):
input='0.0.0.0\n',
)
assert result.exit_code == 0
- assert (result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n') # noqa
+ assert (
+ result.output
+ == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
+ )
def test_register_node_with_default_port(resource_alloc, mocked_g_config):
@@ -111,7 +117,10 @@ def test_register_node_with_default_port(resource_alloc, mocked_g_config):
input='0.0.0.0\n',
)
assert result.exit_code == 0
- assert (result.output == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n') # noqa
+ assert (
+ result.output
+ == 'Enter node public IP: 0.0.0.0\nNode registered in SKALE manager.\nFor more info run < skale node info >\n' # noqa
+ )
def test_register_with_no_alloc(mocked_g_config):
@@ -124,7 +133,10 @@ def test_register_with_no_alloc(mocked_g_config):
input='0.0.0.0\n',
)
assert result.exit_code == 8
- assert (result.output == f"Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn't been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n") # noqa
+ assert (
+ result.output
+ == f"Enter node public IP: 0.0.0.0\nCommand failed with following errors:\n--------------------------------------------------\nNode hasn't been inited before.\nYou should run < skale node init >\n--------------------------------------------------\nYou can find more info in {G_CONF_HOME}.skale/.skale-cli-log/debug-node-cli.log\n" # noqa
+ )
def test_node_info_node_info():
@@ -149,7 +161,10 @@ def test_node_info_node_info():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n') # noqa
+ assert (
+ result.output
+ == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Active\n--------------------------------------------------\n' # noqa
+ )
def test_node_info_node_info_not_created():
@@ -199,7 +214,10 @@ def test_node_info_node_info_frozen():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n') # noqa
+ assert (
+ result.output
+ == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Frozen\n--------------------------------------------------\n' # noqa
+ )
def test_node_info_node_info_left():
@@ -224,7 +242,10 @@ def test_node_info_node_info_left():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n') # noqa
+ assert (
+ result.output
+ == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Left\n--------------------------------------------------\n' # noqa
+ )
def test_node_info_node_info_leaving():
@@ -249,7 +270,10 @@ def test_node_info_node_info_leaving():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n') # noqa
+ assert (
+ result.output
+ == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: Leaving\n--------------------------------------------------\n' # noqa
+ )
def test_node_info_node_info_in_maintenance():
@@ -274,7 +298,10 @@ def test_node_info_node_info_in_maintenance():
resp_mock = response_mock(requests.codes.ok, json_data={'payload': payload, 'status': 'ok'})
result = run_command_mock('node_cli.utils.helper.requests.get', resp_mock, node_info)
assert result.exit_code == 0
- assert (result.output == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n') # noqa
+ assert (
+ result.output
+ == '--------------------------------------------------\nNode info\nName: test\nID: 32\nIP: 0.0.0.0\nPublic IP: 1.1.1.1\nPort: 10001\nDomain name: skale.test\nStatus: In Maintenance\n--------------------------------------------------\n' # noqa
+ )
def test_node_signature():
@@ -301,14 +328,18 @@ def test_restore(mocked_g_config):
'\n', ''
)
- with patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op, patch(
- 'subprocess.run', new=subprocess_run_mock
- ), patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), patch(
- 'node_cli.utils.decorators.is_node_inited', return_value=False
- ), patch(
- 'node_cli.core.node.get_meta_info',
- return_value=CliMeta(version='2.4.0', config_stream='3.0.2'),
- ), patch('node_cli.operations.base.configure_nftables'):
+ with (
+ patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op,
+ patch('subprocess.run', new=subprocess_run_mock),
+ patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ patch('node_cli.utils.decorators.is_node_inited', return_value=False),
+ patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(version='2.4.0', config_stream='3.0.2'),
+ ),
+ patch('node_cli.operations.base.configure_nftables'),
+ patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
result = run_command(restore_node, [backup_path, './tests/test-env'])
assert result.exit_code == 0
assert 'Node is restored from backup\n' in result.output # noqa
@@ -323,14 +354,18 @@ def test_restore_no_snapshot(mocked_g_config):
'\n', ''
)
- with patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op, patch(
- 'subprocess.run', new=subprocess_run_mock
- ), patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), patch(
- 'node_cli.utils.decorators.is_node_inited', return_value=False
- ), patch(
- 'node_cli.core.node.get_meta_info',
- return_value=CliMeta(version='2.4.0', config_stream='3.0.2'),
- ), patch('node_cli.operations.base.configure_nftables'):
+ with (
+ patch('node_cli.core.node.restore_op', MagicMock()) as mock_restore_op,
+ patch('subprocess.run', new=subprocess_run_mock),
+ patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ patch('node_cli.utils.decorators.is_node_inited', return_value=False),
+ patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(version='2.4.0', config_stream='3.0.2'),
+ ),
+ patch('node_cli.operations.base.configure_nftables'),
+ patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
result = run_command(restore_node, [backup_path, './tests/test-env', '--no-snapshot'])
assert result.exit_code == 0
assert 'Node is restored from backup\n' in result.output # noqa
@@ -347,7 +382,7 @@ def test_maintenance_on():
assert (
result.output
== 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n'
- ) # noqa
+ )
def test_maintenance_off(mocked_g_config):
@@ -359,14 +394,17 @@ def test_maintenance_off(mocked_g_config):
assert (
result.output
== 'Setting maintenance mode off...\nNode is successfully removed from maintenance mode\n'
- ) # noqa
+ )
def test_turn_off_maintenance_on(mocked_g_config):
resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None})
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.node.turn_off_op'
- ), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.node.turn_off_op'),
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True),
+ patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
result = run_command_mock(
'node_cli.utils.helper.requests.post',
resp_mock,
@@ -376,11 +414,9 @@ def test_turn_off_maintenance_on(mocked_g_config):
assert (
result.output
== 'Setting maintenance mode on...\nNode is successfully set in maintenance mode\n'
- ) # noqa
+ )
assert result.exit_code == 0
- with mock.patch(
- 'node_cli.utils.docker_utils.is_container_running', return_value=True
- ):
+ with mock.patch('node_cli.utils.docker_utils.is_container_running', return_value=True):
result = run_command_mock(
'node_cli.utils.helper.requests.post',
resp_mock,
@@ -393,11 +429,14 @@ def test_turn_off_maintenance_on(mocked_g_config):
def test_turn_on_maintenance_off(mocked_g_config):
resp_mock = response_mock(requests.codes.ok, {'status': 'ok', 'payload': None})
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.node.get_flask_secret_key'
- ), mock.patch('node_cli.core.node.turn_on_op'), mock.patch(
- 'node_cli.core.node.is_base_containers_alive'
- ), mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.node.get_flask_secret_key'),
+ mock.patch('node_cli.core.node.turn_on_op'),
+ mock.patch('node_cli.core.node.is_base_containers_alive'),
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True),
+ patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
result = run_command_mock(
'node_cli.utils.helper.requests.post',
resp_mock,
@@ -409,7 +448,7 @@ def test_turn_on_maintenance_off(mocked_g_config):
assert (
result.output
== 'Setting maintenance mode off...\nNode is successfully removed from maintenance mode\n'
- ) # noqa, tmp fix
+ )
def test_set_domain_name():
@@ -425,18 +464,21 @@ def test_set_domain_name():
assert result.exit_code == 0
assert (
result.output == 'Setting new domain name: skale.test\nDomain name successfully changed\n'
- ) # noqa
+ )
def test_node_version(meta_file_v2):
with mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True):
result = run_command(version)
assert result.exit_code == 0
- assert (result.output == '--------------------------------------------------\nVersion: 0.1.1\nConfig Stream: develop\nLvmpy stream: 1.1.2\n--------------------------------------------------\n') # noqa
+ assert (
+ result.output
+ == '--------------------------------------------------\nVersion: 0.1.1\nConfig Stream: develop\nLvmpy stream: 1.1.2\n--------------------------------------------------\n' # noqa
+ )
result = run_command(version, ['--json'])
assert result.exit_code == 0
assert (
result.output
== "{'version': '0.1.1', 'config_stream': 'develop', 'docker_lvmpy_stream': '1.1.2'}\n"
- ) # noqa
+ )
diff --git a/tests/cli/resources_allocation_test.py b/tests/cli/resources_allocation_test.py
index d84d9fc3..8836435f 100644
--- a/tests/cli/resources_allocation_test.py
+++ b/tests/cli/resources_allocation_test.py
@@ -24,9 +24,7 @@
import pytest
-from node_cli.configs.resource_allocation import (
- RESOURCE_ALLOCATION_FILEPATH, NODE_DATA_PATH
-)
+from node_cli.configs.resource_allocation import RESOURCE_ALLOCATION_FILEPATH, NODE_DATA_PATH
from node_cli.utils.helper import safe_mkdir, write_json
from tests.helper import response_mock, run_command_mock
@@ -48,11 +46,7 @@ def resource_alloc_config():
def test_show(resource_alloc_config):
resp_mock = response_mock(requests.codes.created)
write_json(RESOURCE_ALLOCATION_FILEPATH, TEST_CONFIG)
- result = run_command_mock(
- 'node_cli.utils.helper.post_request',
- resp_mock,
- show
- )
+ result = run_command_mock('node_cli.utils.helper.post_request', resp_mock, show)
assert result.output == json.dumps(TEST_CONFIG, indent=4) + '\n'
assert result.exit_code == 0
@@ -60,40 +54,38 @@ def test_show(resource_alloc_config):
def test_generate():
safe_mkdir(NODE_DATA_PATH)
resp_mock = response_mock(requests.codes.created)
- with mock.patch('node_cli.core.resources.get_disk_size',
- return_value=BIG_DISK_SIZE):
+ with (
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
result = run_command_mock(
- 'node_cli.utils.helper.post_request',
- resp_mock,
- generate,
- ['./tests/test-env', '--yes']
+ 'node_cli.utils.helper.post_request', resp_mock, generate, ['./tests/test-env', '--yes']
)
- assert result.output == (f'Resource allocation file generated: '
- f'{RESOURCE_ALLOCATION_FILEPATH}\n')
+ assert result.output == (
+ f'Resource allocation file generated: {RESOURCE_ALLOCATION_FILEPATH}\n'
+ )
assert result.exit_code == 0
def test_generate_already_exists(resource_alloc_config):
resp_mock = response_mock(requests.codes.created)
- with mock.patch('node_cli.core.resources.get_disk_size',
- return_value=BIG_DISK_SIZE):
+ with (
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
result = run_command_mock(
- 'node_cli.utils.helper.post_request',
- resp_mock,
- generate,
- ['./tests/test-env', '--yes']
+ 'node_cli.utils.helper.post_request', resp_mock, generate, ['./tests/test-env', '--yes']
)
assert result.output == 'Resource allocation file is already exists\n'
assert result.exit_code == 0
result = run_command_mock(
- 'node_cli.utils.helper.post_request',
- resp_mock,
- generate,
- ['./tests/test-env', '--yes', '--force']
+ 'node_cli.utils.helper.post_request',
+ resp_mock,
+ generate,
+ ['./tests/test-env', '--yes', '--force'],
)
assert result.output == (
- f'Resource allocation file generated: '
- f'{RESOURCE_ALLOCATION_FILEPATH}\n'
+ f'Resource allocation file generated: {RESOURCE_ALLOCATION_FILEPATH}\n'
)
assert result.exit_code == 0
diff --git a/tests/cli/sync_node_test.py b/tests/cli/sync_node_test.py
index 3465bfc8..76d50648 100644
--- a/tests/cli/sync_node_test.py
+++ b/tests/cli/sync_node_test.py
@@ -35,14 +35,16 @@
init_default_logger()
-def test_init_sync(mocked_g_config):
+def test_init_sync(mocked_g_config, clean_node_options):
pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True)
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.node.init_sync_op'
- ), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
- 'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
- 'node_cli.utils.decorators.is_node_inited', return_value=False
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.node.init_sync_op'),
+ mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True),
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.operations.base.configure_nftables'),
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
):
result = run_command(_init_sync, ['./tests/test-env'])
@@ -57,28 +59,27 @@ def test_init_sync(mocked_g_config):
def test_init_sync_archive(mocked_g_config, clean_node_options):
pathlib.Path(NODE_DATA_PATH).mkdir(parents=True, exist_ok=True)
# with mock.patch('subprocess.run', new=subprocess_run_mock), \
- with mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
- 'node_cli.operations.base.cleanup_volume_artifacts'
- ), mock.patch('node_cli.operations.base.download_skale_node'), mock.patch(
- 'node_cli.operations.base.sync_skale_node'
- ), mock.patch('node_cli.operations.base.configure_docker'), mock.patch(
- 'node_cli.operations.base.prepare_host'
- ), mock.patch('node_cli.operations.base.ensure_filestorage_mapping'), mock.patch(
- 'node_cli.operations.base.link_env_file'
- ), mock.patch('node_cli.operations.base.download_contracts'), mock.patch(
- 'node_cli.operations.base.generate_nginx_config'
- ), mock.patch('node_cli.operations.base.prepare_block_device'), mock.patch(
- 'node_cli.operations.base.update_meta'
- ), mock.patch('node_cli.operations.base.update_resource_allocation'), mock.patch(
- 'node_cli.operations.base.update_images'
- ), mock.patch('node_cli.operations.base.compose_up'), mock.patch(
- 'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
- 'node_cli.utils.decorators.is_node_inited', return_value=False
+ with (
+ mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True),
+ mock.patch('node_cli.operations.base.cleanup_volume_artifacts'),
+ mock.patch('node_cli.operations.base.download_skale_node'),
+ mock.patch('node_cli.operations.base.sync_skale_node'),
+ mock.patch('node_cli.operations.base.configure_docker'),
+ mock.patch('node_cli.operations.base.prepare_host'),
+ mock.patch('node_cli.operations.base.ensure_filestorage_mapping'),
+ mock.patch('node_cli.operations.base.link_env_file'),
+ mock.patch('node_cli.operations.base.generate_nginx_config'),
+ mock.patch('node_cli.operations.base.prepare_block_device'),
+ mock.patch('node_cli.operations.base.update_meta'),
+ mock.patch('node_cli.operations.base.update_resource_allocation'),
+ mock.patch('node_cli.operations.base.update_images'),
+ mock.patch('node_cli.operations.base.compose_up'),
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.operations.base.configure_nftables'),
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
):
- result = run_command(
- _init_sync, ['./tests/test-env', '--archive', '--historic-state']
- )
+ result = run_command(_init_sync, ['./tests/test-env', '--archive', '--historic-state'])
node_options = NodeOptions()
assert node_options.archive
@@ -90,12 +91,13 @@ def test_init_sync_archive(mocked_g_config, clean_node_options):
def test_init_sync_historic_state_fail(mocked_g_config, clean_node_options):
pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True)
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.node.init_sync_op'
- ), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
- 'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
- 'node_cli.utils.decorators.is_node_inited', return_value=False
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.node.init_sync_op'),
+ mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True),
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.operations.base.configure_nftables'),
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=False),
):
result = run_command(_init_sync, ['./tests/test-env', '--historic-state'])
assert result.exit_code == 1
@@ -105,15 +107,18 @@ def test_init_sync_historic_state_fail(mocked_g_config, clean_node_options):
def test_update_sync(mocked_g_config):
pathlib.Path(SKALE_DIR).mkdir(parents=True, exist_ok=True)
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.node.update_sync_op'
- ), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
- 'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
- 'node_cli.utils.decorators.is_node_inited', return_value=True
- ), mock.patch(
- 'node_cli.core.node.get_meta_info',
- return_value=CliMeta(version='2.6.0', config_stream='3.0.2')
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.node.update_sync_op'),
+ mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True),
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.operations.base.configure_nftables'),
+ mock.patch('node_cli.utils.decorators.is_node_inited', return_value=True),
+ mock.patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(version='2.6.0', config_stream='3.0.2'),
+ ),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
):
result = run_command(_update_sync, ['./tests/test-env', '--yes'])
assert result.exit_code == 0
diff --git a/tests/cli/validate_test.py b/tests/cli/validate_test.py
deleted file mode 100644
index 7a595b87..00000000
--- a/tests/cli/validate_test.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import json
-import pathlib
-import shutil
-
-import pytest
-
-from node_cli.configs import (CONTRACTS_PATH, G_CONF_HOME,
- IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH)
-from node_cli.cli.validate import abi
-from tests.helper import run_command
-
-
-@pytest.fixture
-def contracts_info_dir():
- pathlib.Path(CONTRACTS_PATH).mkdir(parents=True, exist_ok=True)
- yield CONTRACTS_PATH
- shutil.rmtree(CONTRACTS_PATH)
-
-
-@pytest.fixture
-def contract_valid_abi_files(contracts_info_dir):
- json_data = {'test': 'abi'}
- with open(IMA_CONTRACTS_FILEPATH, 'w') as ima_abi_file:
- json.dump(json_data, ima_abi_file)
- with open(MANAGER_CONTRACTS_FILEPATH, 'w') as manager_abi_file:
- json.dump(json_data, manager_abi_file)
- yield IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH
-
-
-@pytest.fixture
-def contract_abi_file_invalid(contracts_info_dir):
- json_data = {'test': 'abi'}
- with open(IMA_CONTRACTS_FILEPATH, 'w') as ima_abi_file:
- json.dump(json_data, ima_abi_file)
- with open(MANAGER_CONTRACTS_FILEPATH, 'w') as manager_abi_file:
- manager_abi_file.write('Invalid json')
- yield IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH
-
-
-@pytest.fixture
-def contract_abi_file_empty(contracts_info_dir):
- json_data = {'test': 'abi'}
- with open(IMA_CONTRACTS_FILEPATH, 'w') as ima_abi_file:
- json.dump(json_data, ima_abi_file)
- yield IMA_CONTRACTS_FILEPATH, MANAGER_CONTRACTS_FILEPATH
-
-
-def test_validate_abi(contract_valid_abi_files):
- result = run_command(abi)
- assert result.output == 'All abi files are correct json files!\n'
- assert result.exit_code == 0
-
-
-def test_validate_abi_invalid_file(contract_abi_file_invalid):
- result = run_command(abi)
- assert 'Some files do not exist or are incorrect' in result.output
- assert f'{G_CONF_HOME}.skale/contracts_info/manager.json error Failed to load abi file as json' in result.output # noqa
- assert f'{G_CONF_HOME}.skale/contracts_info/ima.json ok' in result.output
- assert result.exit_code == 0
-
-
-def test_validate_abi_empty_file(contract_abi_file_empty):
- result = run_command(abi)
- assert 'Some files do not exist or are incorrect' in result.output
- assert f'{G_CONF_HOME}.skale/contracts_info/manager.json error No such file' in result.output # noqa
- assert f'{G_CONF_HOME}.skale/contracts_info/ima.json ok' in result.output
- assert result.exit_code == 0
diff --git a/tests/configs/configs_env_validate_test.py b/tests/configs/configs_env_validate_test.py
new file mode 100644
index 00000000..5e0bf3e9
--- /dev/null
+++ b/tests/configs/configs_env_validate_test.py
@@ -0,0 +1,323 @@
+import os
+from typing import Optional
+import pytest
+import requests
+
+from node_cli.configs.env import (
+ absent_params,
+ load_env_file,
+ build_params,
+ populate_params,
+ get_env_config,
+ validate_params,
+ validate_env_type,
+ validate_env_alias_or_address,
+ validate_contract_address,
+ validate_contract_alias,
+ get_chain_id,
+ get_network_metadata,
+ ContractType,
+ ALLOWED_ENV_TYPES,
+)
+from node_cli.utils.exit_codes import CLIExitCodes
+
+
+# =============================================================================
+# Helper fake response for patching requests.get in network helpers
+# =============================================================================
+class FakeResponse:
+ def __init__(self, status_code: int, json_data: Optional[dict] = None):
+ self.status_code = status_code
+ self._json_data = json_data or {}
+
+ def json(self):
+ return self._json_data
+
+
+# =============================================================================
+# Tests for absent_params
+# =============================================================================
+class TestAbsentParams:
+ def test_absent_params_returns_missing_keys(self):
+ params = {
+ 'A': '', # missing
+ 'B': 'value',
+ 'C': '', # missing
+ 'MONITORING_CONTAINERS': 'optional',
+ }
+ missing = absent_params(params)
+ # We expect keys A and C to be missing (assuming they are required)
+ assert 'A' in missing
+ assert 'C' in missing
+ # Optional keys should not be flagged
+ assert 'MONITORING_CONTAINERS' not in missing
+
+
+# =============================================================================
+# Tests for file loading
+# =============================================================================
+class TestLoadEnvFile:
+ def test_load_env_file_nonexistent(self):
+ with pytest.raises(SystemExit) as excinfo:
+ load_env_file('nonexistent.env')
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+ def test_load_env_file_not_readable(self, tmp_path):
+ # Create a temporary file and remove read permissions
+ env_file = tmp_path / 'test.env'
+ env_file.write_text('KEY=value')
+ os.chmod(env_file, 0o000)
+ with pytest.raises(SystemExit) as excinfo:
+ load_env_file(str(env_file))
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+ os.chmod(env_file, 0o644) # reset permissions
+
+
+# =============================================================================
+# Tests for building and populating parameters
+# =============================================================================
+class TestBuildAndPopulate:
+ def test_build_params_sync(self):
+ params = build_params(sync_node=True)
+ # Should contain SCHAIN_NAME among required keys.
+ assert 'SCHAIN_NAME' in params
+
+ def test_build_params_non_sync(self):
+ params = build_params(sync_node=False)
+ # Should not contain SCHAIN_NAME (only in sync dictionary)
+ assert 'SCHAIN_NAME' not in params
+
+ def test_populate_params_updates_from_environ(self, monkeypatch):
+ # Start with a base dictionary.
+ params = {'FOO': ''}
+ monkeypatch.setenv('FOO', 'bar')
+ populate_params(params)
+ assert params['FOO'] == 'bar'
+
+
+# =============================================================================
+# Tests for validate_env_type
+# =============================================================================
+class TestEnvType:
+ @pytest.mark.parametrize('env_type', ['mainnet', 'testnet', 'qanet', 'devnet'])
+ def test_valid_env_types(self, env_type):
+ # Should pass without exiting
+ validate_env_type(env_type)
+
+ def test_invalid_env_type(self):
+ with pytest.raises(SystemExit) as excinfo:
+ validate_env_type('invalid')
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+
+# =============================================================================
+# Tests for network helper functions
+# =============================================================================
+class TestNetworkHelpers:
+ def test_get_chain_id_success(self, monkeypatch):
+ fake_response = FakeResponse(200, {'result': '0x1'})
+
+ def fake_post(url, json):
+ return fake_response
+
+ monkeypatch.setattr(requests, 'post', fake_post)
+ chain_id = get_chain_id('http://localhost:8545')
+ assert chain_id == 1
+
+ def test_get_chain_id_failure(self, monkeypatch):
+ fake_response = FakeResponse(404)
+
+ def fake_post(url, json):
+ return fake_response
+
+ monkeypatch.setattr(requests, 'post', fake_post)
+ with pytest.raises(SystemExit) as excinfo:
+ get_chain_id('http://localhost:8545')
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+ def test_get_network_metadata_success(self, requests_mock):
+ metadata = {'networks': [{'chainId': 1, 'path': 'mainnet'}]}
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ requests_mock.get(metadata_url, json=metadata, status_code=200)
+ result = get_network_metadata()
+ assert result == metadata
+
+ def test_get_network_metadata_failure(self, requests_mock):
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ requests_mock.get(metadata_url, status_code=404)
+ with pytest.raises(SystemExit) as excinfo:
+ get_network_metadata()
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+
+# =============================================================================
+# Tests for contract validations
+# =============================================================================
+class TestContractValidation:
+ def test_validate_contract_address_success(self, requests_mock):
+ # Simulate a valid contract code response.
+ endpoint = 'http://localhost:8545'
+ requests_mock.post(endpoint, json={'result': '0x123'})
+ # This call should not exit.
+ validate_contract_address('0x' + 'a' * 40, endpoint)
+
+ def test_validate_contract_address_no_code(self, requests_mock):
+ endpoint = 'http://localhost:8545'
+ requests_mock.post(endpoint, json={'result': '0x'})
+ with pytest.raises(SystemExit) as excinfo:
+ validate_contract_address('0x' + 'a' * 40, endpoint)
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+ def test_validate_contract_alias_success(self, requests_mock):
+ endpoint = 'http://localhost:8545'
+ # Fake chain ID response.
+ requests_mock.post(endpoint, json={'result': '0x1'})
+ # Fake metadata response.
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ metadata = {'networks': [{'chainId': 1, 'path': 'mainnet'}]}
+ requests_mock.get(metadata_url, json=metadata, status_code=200)
+ # Fake deployment URL response.
+ alias_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/mainnet/skale-manager/test-alias.json'
+ )
+ requests_mock.get(alias_url, status_code=200)
+ validate_contract_alias('test-alias', ContractType.MANAGER, endpoint)
+
+ def test_validate_contract_alias_network_missing(self, requests_mock):
+ endpoint = 'http://localhost:8545'
+ requests_mock.post(endpoint, json={'result': '0x1'})
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ # Return empty networks list.
+ requests_mock.get(metadata_url, json={'networks': []}, status_code=200)
+ with pytest.raises(SystemExit) as excinfo:
+ validate_contract_alias('test-alias', ContractType.MANAGER, endpoint)
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+
+# =============================================================================
+# Tests for validate_env_alias_or_address and validate_params
+# =============================================================================
+class TestEnvAliasAndParams:
+ def test_validate_env_alias_or_address_with_address(self, requests_mock):
+ endpoint = 'http://localhost:8545'
+ # Provide a fake contract address: 42 characters starting with '0x'
+ addr = '0x' + 'b' * 40
+ # Patch validate_contract_address to succeed
+ requests_mock.post(endpoint, json={'result': '0x1'})
+ validate_env_alias_or_address(addr, ContractType.IMA, endpoint)
+
+ def test_validate_env_alias_or_address_with_alias(self, requests_mock):
+ endpoint = 'http://localhost:8545'
+ # For alias, we simulate a valid contract alias check.
+ # Fake chain ID response:
+ requests_mock.post(endpoint, json={'result': '0x1'})
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ metadata = {'networks': [{'chainId': 1, 'path': 'mainnet'}]}
+ requests_mock.get(metadata_url, json=metadata, status_code=200)
+ # Fake deployment response.
+ alias_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/mainnet/mainnet-ima/test-alias.json'
+ )
+ requests_mock.get(alias_url, status_code=200)
+ validate_env_alias_or_address('test-alias', ContractType.IMA, endpoint)
+
+ def test_validate_params_missing_key(self):
+ # Create a dictionary missing one required key.
+ populated_params = {
+ 'CONTAINER_CONFIGS_STREAM': 'value',
+ 'ENDPOINT': 'http://localhost:8545',
+ 'MANAGER_CONTRACTS': '',
+ 'FILEBEAT_HOST': '127.0.0.1:3010',
+ 'DISK_MOUNTPOINT': '/dev/sss',
+ 'SGX_SERVER_URL': 'http://127.0.0.1',
+ 'DOCKER_LVMPY_STREAM': 'value',
+ 'ENV_TYPE': 'mainnet',
+ }
+ with pytest.raises(SystemExit) as excinfo:
+ validate_params(populated_params)
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+ def test_validate_params_success(self, valid_env_params, requests_mock):
+ endpoint = valid_env_params['ENDPOINT']
+ # Fake chain ID response.
+ requests_mock.post(endpoint, json={'result': '0x1'})
+ # Fake metadata response.
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ metadata = {'networks': [{'chainId': 1, 'path': 'mainnet'}]}
+ requests_mock.get(metadata_url, json=metadata, status_code=200)
+ ima_alias_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/mainnet/mainnet-ima/test-ima.json'
+ )
+ requests_mock.get(ima_alias_url, status_code=200)
+ manager_alias_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/mainnet/skale-manager/test-manager.json'
+ )
+ requests_mock.get(manager_alias_url, status_code=200)
+ # Should not exit.
+ validate_params(valid_env_params)
+
+
+# =============================================================================
+# Tests for get_env_config
+# =============================================================================
+class TestGetEnvConfig:
+ def test_get_env_config_success(
+ self, valid_env_file, mock_chain_response, mock_networks_metadata, requests_mock
+ ):
+ endpoint = 'http://localhost:8545'
+ # Patch network calls used in validation
+ requests_mock.post(endpoint, json=mock_chain_response)
+ metadata_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/metadata.json'
+ )
+ requests_mock.get(metadata_url, json=mock_networks_metadata, status_code=200)
+ ima_alias_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/mainnet/mainnet-ima/test-ima.json'
+ )
+ requests_mock.get(ima_alias_url, status_code=200)
+ manager_alias_url = (
+ 'https://raw.githubusercontent.com/skalenetwork/skale-contracts/'
+ 'refs/heads/deployments/mainnet/skale-manager/test-manager.json'
+ )
+ requests_mock.get(manager_alias_url, status_code=200)
+ config = get_env_config(valid_env_file)
+ # Assert that keys from the env file are present (using string values)
+ assert config['ENDPOINT'] == 'http://localhost:8545'
+ # Also check that ENV_TYPE is one of the allowed ones
+ assert config['ENV_TYPE'] in ALLOWED_ENV_TYPES
+
+ def test_get_env_config_missing_file(self):
+ with pytest.raises(SystemExit) as excinfo:
+ get_env_config('nonexistent.env')
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+
+ def test_get_env_config_unreadable_file(self, valid_env_file):
+ os.chmod(valid_env_file, 0o000)
+ with pytest.raises(SystemExit) as excinfo:
+ get_env_config(valid_env_file)
+ assert excinfo.value.code == CLIExitCodes.FAILURE.value
+ os.chmod(valid_env_file, 0o644)
diff --git a/tests/configs_env_test.py b/tests/configs_env_test.py
deleted file mode 100644
index 1fe9ac4e..00000000
--- a/tests/configs_env_test.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from node_cli.configs.env import NotValidEnvParamsError, validate_params
-
-
-def test_validate_params():
- valid_config = {'ENV_TYPE': 'mainnet'}
- validate_params(valid_config)
- invalid_config = {'ENV_TYPE': ''}
- error = None
- try:
- validate_params(invalid_config)
- except NotValidEnvParamsError as e:
- error = e
- assert error is not None
- earg = 'Allowed ENV_TYPE values are [\'mainnet\', \'testnet\', \'qanet\', \'devnet\']. Actual: ""' # noqa
- assert error.args[0] == earg
diff --git a/tests/conftest.py b/tests/conftest.py
index 824ba93d..68ffb959 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -16,10 +16,11 @@
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see .
-""" SKALE config test """
+"""SKALE config test"""
import json
import os
+import tempfile
import pathlib
import shutil
@@ -36,7 +37,7 @@
NGINX_CONTAINER_NAME,
REMOVED_CONTAINERS_FOLDER_PATH,
STATIC_PARAMS_FILEPATH,
- SCHAIN_NODE_DATA_PATH
+ SCHAIN_NODE_DATA_PATH,
)
from node_cli.configs.node_options import NODE_OPTIONS_FILEPATH
from node_cli.configs.ssl import SSL_FOLDER_PATH
@@ -76,20 +77,6 @@
iptables-persistant: 1.1.3
lvm2: 1.1.1
-testnet:
- server:
- cpu_total: 4
- cpu_physical: 4
- memory: 32
- swap: 16
- disk: 200000000000
-
- packages:
- docker: 1.1.3
- docker-compose: 1.1.3
- iptables-persistant: 1.1.3
- lvm2: 1.1.1
-
qanet:
server:
cpu_total: 4
@@ -126,11 +113,7 @@
@pytest.fixture
def net_params_file():
with open(STATIC_PARAMS_FILEPATH, 'w') as f:
- yaml.dump(
- yaml.load(TEST_ENV_PARAMS, Loader=yaml.Loader),
- stream=f,
- Dumper=yaml.Dumper
- )
+ yaml.dump(yaml.load(TEST_ENV_PARAMS, Loader=yaml.Loader), stream=f, Dumper=yaml.Dumper)
yield STATIC_PARAMS_FILEPATH
os.remove(STATIC_PARAMS_FILEPATH)
@@ -165,12 +148,7 @@ def dclient():
def simple_image(dclient):
name = 'simple-image'
try:
- dclient.images.build(
- tag=name,
- rm=True,
- nocache=True,
- path='tests/simple_container'
- )
+ dclient.images.build(tag=name, rm=True, nocache=True, path='tests/simple_container')
yield name
finally:
try:
@@ -184,9 +162,7 @@ def simple_image(dclient):
def docker_hc(dclient):
dclient = docker.from_env()
return dclient.api.create_host_config(
- log_config=docker.types.LogConfig(
- type=docker.types.LogConfig.types.JSON
- )
+ log_config=docker.types.LogConfig(type=docker.types.LogConfig.types.JSON)
)
@@ -240,13 +216,7 @@ def nginx_container(dutils, ssl_folder):
'nginx:1.20.2',
name=NGINX_CONTAINER_NAME,
detach=True,
- volumes={
- ssl_folder: {
- 'bind': '/ssl',
- 'mode': 'ro',
- 'propagation': 'slave'
- }
- }
+ volumes={ssl_folder: {'bind': '/ssl', 'mode': 'ro', 'propagation': 'slave'}},
)
yield c
finally:
@@ -321,3 +291,64 @@ def tmp_sync_datadir():
yield TEST_SCHAINS_MNT_DIR_SYNC
finally:
shutil.rmtree(TEST_SCHAINS_MNT_DIR_SYNC)
+
+
+@pytest.fixture
+def valid_env_params():
+ """
+ Return a dictionary of environment parameters that mimics the contents of test-env.
+ """
+ return {
+ 'ENDPOINT': 'http://localhost:8545',
+ 'IMA_ENDPOINT': 'http://127.0.01',
+ 'DB_USER': 'user',
+ 'DB_PASSWORD': 'pass',
+ 'DB_PORT': '3307',
+ 'CONTAINER_CONFIGS_STREAM': 'master',
+ 'FILEBEAT_HOST': '127.0.0.1:3010',
+ 'SGX_SERVER_URL': 'http://127.0.0.1',
+ 'DISK_MOUNTPOINT': '/dev/sss',
+ 'DOCKER_LVMPY_STREAM': 'master',
+ 'ENV_TYPE': 'devnet',
+ 'SCHAIN_NAME': 'test',
+ 'ENFORCE_BTRFS': 'False',
+ 'MANAGER_CONTRACTS': 'test-manager',
+ 'IMA_CONTRACTS': 'test-ima',
+ }
+
+
+@pytest.fixture
+def valid_env_file(valid_env_params):
+ """
+ Create a temporary .env file whose contents mimic test-env.
+
+ This file is created using the key/value pairs from valid_env_params,
+ one per line in the form KEY=VALUE.
+ """
+ with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
+ for key, value in valid_env_params.items():
+ f.write(f'{key}={value}\n')
+ file_name = f.name
+ yield file_name
+ os.unlink(file_name)
+
+
+@pytest.fixture
+def mock_chain_response():
+ """Return a fake RPC response for chain ID 1."""
+ return {
+ 'jsonrpc': '2.0',
+ 'id': 1,
+ 'result': '0x1', # Represents chain ID 1
+ }
+
+
+@pytest.fixture
+def mock_networks_metadata():
+ """Return fake network metadata that includes chain ID 1."""
+ return {
+ 'networks': [
+ {'chainId': 1, 'name': 'Mainnet', 'path': 'mainnet'},
+ {'chainId': 2, 'name': 'Testnet', 'path': 'testnet'},
+ ]
+ }
diff --git a/tests/core/core_node_test.py b/tests/core/core_node_test.py
index f79c6fa3..0257b9cd 100644
--- a/tests/core/core_node_test.py
+++ b/tests/core/core_node_test.py
@@ -142,14 +142,16 @@ def test_init_node(no_resource_file): # todo: write new init node test
resp_mock = response_mock(requests.codes.created)
assert not os.path.isfile(RESOURCE_ALLOCATION_FILEPATH)
env_filepath = './tests/test-env'
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE
- ), mock.patch('node_cli.core.host.prepare_host'), mock.patch(
- 'node_cli.core.host.init_data_dir'
- ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
- 'node_cli.core.node.init_op'
- ), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
- 'node_cli.utils.helper.post_request', resp_mock
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.core.host.prepare_host'),
+ mock.patch('node_cli.core.host.init_data_dir'),
+ mock.patch('node_cli.operations.base.configure_nftables'),
+ mock.patch('node_cli.core.node.init_op'),
+ mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True),
+ mock.patch('node_cli.utils.helper.post_request', resp_mock),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
):
init(env_filepath)
assert os.path.isfile(RESOURCE_ALLOCATION_FILEPATH)
@@ -159,23 +161,26 @@ def test_update_node(mocked_g_config, resource_file):
env_filepath = './tests/test-env'
resp_mock = response_mock(requests.codes.created)
os.makedirs(NODE_DATA_PATH, exist_ok=True)
- with mock.patch('subprocess.run', new=subprocess_run_mock), mock.patch(
- 'node_cli.core.node.update_op'
- ), mock.patch('node_cli.core.node.get_flask_secret_key'), mock.patch(
- 'node_cli.core.node.save_env_params'
- ), mock.patch('node_cli.operations.base.configure_nftables'), mock.patch(
- 'node_cli.core.host.prepare_host'
- ), mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True), mock.patch(
- 'node_cli.utils.helper.post_request', resp_mock
- ), mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE), mock.patch(
- 'node_cli.core.host.init_data_dir'
- ), mock.patch(
- 'node_cli.core.node.get_meta_info',
- return_value=CliMeta(
- version='2.6.0', config_stream='3.0.2'
- )
+ with (
+ mock.patch('subprocess.run', new=subprocess_run_mock),
+ mock.patch('node_cli.core.node.update_op'),
+ mock.patch('node_cli.core.node.get_flask_secret_key'),
+ mock.patch('node_cli.core.node.save_env_params'),
+ mock.patch('node_cli.operations.base.configure_nftables'),
+ mock.patch('node_cli.core.host.prepare_host'),
+ mock.patch('node_cli.core.node.is_base_containers_alive', return_value=True),
+ mock.patch('node_cli.utils.helper.post_request', resp_mock),
+ mock.patch('node_cli.core.resources.get_disk_size', return_value=BIG_DISK_SIZE),
+ mock.patch('node_cli.core.host.init_data_dir'),
+ mock.patch(
+ 'node_cli.core.node.get_meta_info',
+ return_value=CliMeta(version='2.6.0', config_stream='3.0.2'),
+ ),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
):
- with mock.patch( 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response()): # noqa
+ with mock.patch(
+ 'node_cli.utils.helper.requests.get', return_value=safe_update_api_response()
+ ): # noqa
result = update(env_filepath, pull_config_for_schain=None)
assert result is None
@@ -210,7 +215,10 @@ def test_is_update_safe():
def test_repair_sync(tmp_sync_datadir, mocked_g_config, resource_file):
- with mock.patch('node_cli.core.schains.rm_btrfs_subvolume'), \
- mock.patch('node_cli.utils.docker_utils.stop_container'), \
- mock.patch('node_cli.utils.docker_utils.start_container'):
+ with (
+ mock.patch('node_cli.core.schains.rm_btrfs_subvolume'),
+ mock.patch('node_cli.utils.docker_utils.stop_container'),
+ mock.patch('node_cli.utils.docker_utils.start_container'),
+ mock.patch('node_cli.configs.env.validate_params', lambda params: None),
+ ):
repair_sync(archive=True, historic_state=True, snapshot_from='127.0.0.1')
diff --git a/tests/test-env b/tests/test-env
index eb598381..7698a8b8 100644
--- a/tests/test-env
+++ b/tests/test-env
@@ -1,16 +1,15 @@
-ENDPOINT=127.0.0.1
-IMA_ENDPOINT=127.0.01
+ENDPOINT=http://localhost:8545
+IMA_ENDPOINT=http://127.0.01
DB_USER=user
DB_PASSWORD=pass
DB_PORT=3307
CONTAINER_CONFIGS_STREAM='master'
-MANAGER_CONTRACTS_ABI_URL=http://127.0.0.1
-IMA_CONTRACTS_ABI_URL=http:/127.0.0.1
FILEBEAT_HOST=127.0.0.1:3010
-MANAGER_CONTRACTS_ABI_URL=http://127.0.0.1
SGX_SERVER_URL=http://127.0.0.1
DISK_MOUNTPOINT=/dev/sss
DOCKER_LVMPY_STREAM='master'
ENV_TYPE='devnet'
SCHAIN_NAME='test'
-ENFORCE_BTRFS=False
\ No newline at end of file
+ENFORCE_BTRFS=False
+MANAGER_CONTRACTS='test-manager'
+IMA_CONTRACTS='test-ima'
\ No newline at end of file