diff --git a/api/src/api/tests_api.py b/api/src/api/tests_api.py index 6e035636..f7bed0aa 100644 --- a/api/src/api/tests_api.py +++ b/api/src/api/tests_api.py @@ -63,7 +63,23 @@ def get_test_run(test_suite_id, test_run_id): else: return jsonify({'error': 'Not Found', 'message':'Test run with id: ' + str(test_run_id) +' and test suite id: '+ str(test_suite_id) +' not found'}), HTTP_STATUS_NOT_FOUND +@api.route('/test_suites/delete', methods=['POST']) +@cross_origin(origins=['*'], supports_credentials=True) +def delete_test_suites(): + try: + data = request.get_json() + __validate_delete_test_suites(data) + test_suites_service.delete_test_suites(data['ids']) + return jsonify(), HTTP_STATUS_NO_CONTENT + except (ApiException) as e: + return jsonify({'error': e.error, 'message': e.message}), e.status_code + def __validate_update_test_suite(data): if not data or 'name' not in data or 'description' not in data: - raise ApiException('Missing properties, required properties: name, description', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST) \ No newline at end of file + raise ApiException('Missing properties, required properties: name, description', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST) + + +def __validate_delete_test_suites(data): + if not data or 'ids' not in data: + raise ApiException('Missing properties, required property: ids', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST) \ No newline at end of file diff --git a/api/src/services/analyze_service.py b/api/src/services/analyze_service.py index ef07c43d..6f504bb2 100644 --- a/api/src/services/analyze_service.py +++ b/api/src/services/analyze_service.py @@ -1,5 +1,3 @@ -import os -import uuid import time import requests import logging @@ -8,7 +6,7 @@ from datetime import datetime, timedelta from flask import jsonify, current_app import src.services.test_suites_service as test_suites_service -import src.services.metrics_service as metrics_service +import src.utils.metrics_collection_manager as metrics_collection_manager from src.enums.status import Status # constants @@ -43,12 +41,12 @@ def analyze(data): def __create_test_run(algorithm, iterations, message_size, test_suite_id): start_time = datetime.now() - metrics_service.start_collecting() - status, status_message = __run(algorithm, iterations, message_size) - metrics_service.stop_collecting() + metrics_collection_manager.start_collecting() + status, status_message, requests_size = __run(algorithm, iterations, message_size) + metrics_collection_manager.stop_collecting() end_time = datetime.now() - test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, *metrics_service.get_metrics()) - + test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, requests_size, *metrics_collection_manager.get_metrics()) + def __run(algorithm, iterations, message_size): logging.debug('Running test for algorithm: %s ', algorithm) @@ -61,10 +59,11 @@ def __run(algorithm, iterations, message_size): response = requests.post(current_app.configurations.curl_url + "/curl", headers=headers, json=payload, timeout=int(current_app.configurations.request_timeout)) return __validate_response(response) - + def __validate_response(response): - if response.status_code < 200 or response.status_code > 299: - return Status.FAILED, json.dumps(response.json()) + data = response.json() + if response.status_code < 200 or response.status_code > 299: + return Status.FAILED, json.dumps(data), 0 else: - return Status.SUCCESS, "" \ No newline at end of file + return Status.SUCCESS, "", data.get('totalRequestSize') diff --git a/api/src/services/metrics_service.py b/api/src/services/metrics_service.py index 37f0a8d8..7ca167bf 100644 --- a/api/src/services/metrics_service.py +++ b/api/src/services/metrics_service.py @@ -1,22 +1,57 @@ -from src.utils.metrics_collector import MetricsCollector import logging -client_collector = MetricsCollector("qujata-curl") -server_collector = MetricsCollector("qujata-nginx") - -# TODO: add lock validation -def start_collecting(): - client_collector.start() - server_collector.start() - -def stop_collecting(): - client_collector.stop() - server_collector.stop() - # print collectors results - logging.info(client_collector.to_pretty_table()) - logging.info(server_collector.to_pretty_table()) - -def get_metrics(): - client_data = client_collector.get_data() - server_data = server_collector.get_data() - return client_data, server_data +from flask import current_app +from src.models.test_run_metric import TestRunMetric +from src.enums.metric import Metric +import pytz +from dateutil import parser + + +def create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time): + __save_resources_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run) + __save_resources_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run) + __save_throughput_metrics(Metric.MESSAGES_THROUGHPUT_PER_SECOND, Metric.BYTES_THROUGHPUT_PER_SECOND, start_time, + end_time, requests_size, test_run) + + +def __save_resources_metrics(cpu_metric_name, memory_metric_name, metrics, test_run): + cpu, memory = __calculate_average(metrics, test_run.start_time) + __save_metric_to_db(test_run, cpu_metric_name, cpu) + __save_metric_to_db(test_run, memory_metric_name, memory) + + +def __save_throughput_metrics(requests_metric_name, bytes_metric_name, start_time, end_time, requests_size, test_run): + requests_throughput, bytes_throughput = __calculate_throughput(test_run.iterations, start_time, end_time, requests_size) + __save_metric_to_db(test_run, requests_metric_name, requests_throughput) + __save_metric_to_db(test_run, bytes_metric_name, bytes_throughput) + + +def __save_metric_to_db(test_run, metric_name, metric_value): + test_run_metric = TestRunMetric( + test_run_id=test_run.id, + metric_name=metric_name, + value=metric_value + ) + current_app.database_manager.create(test_run_metric) + + +def __calculate_average(metrics, start_time): + cpu, memory = 0, 0 + counter = 0 + for ts, value in metrics.items(): + if parser.parse(ts) >= start_time.astimezone(pytz.UTC): + cpu += value["cpu"] + memory += value["memory"] + counter += 1 + + if counter == 0: + return 0, 0 + return round(cpu/counter, 2), round(memory/counter, 0) + + +def __calculate_throughput(iterations, start_time, end_time, requests_size): + seconds = (end_time - start_time).total_seconds() + request_throughput = 0 if seconds == 0 else iterations / seconds + bytes_throughput = 0 if seconds == 0 or requests_size is None else int(requests_size) / seconds + return round(request_throughput, 0), round(bytes_throughput, 0) + diff --git a/api/src/services/test_suites_service.py b/api/src/services/test_suites_service.py index c691f8e6..24043b4e 100644 --- a/api/src/services/test_suites_service.py +++ b/api/src/services/test_suites_service.py @@ -3,14 +3,10 @@ from src.models.env_info import EnvInfo from src.models.test_suite import TestSuite from src.models.test_run import TestRun -from src.models.test_run_metric import TestRunMetric -from src.enums.metric import Metric from src.exceptions.exceptions import ApiException, NotFoundException import src.utils.test_suite_serializer as test_suite_serializer -import pytz -from dateutil import parser -import logging - +import src.services.metrics_service as metrics_service +from src.enums.status import Status # constants HTTP_STATUS_UNPROCESSABLE_ENTITY = 422 @@ -36,7 +32,7 @@ def create_test_suite(data): current_app.database_manager.create(test_suite) return test_suite -def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, client_metrics, server_metrics): +def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, requests_size, client_metrics, server_metrics): test_run = TestRun( start_time=start_time, end_time=end_time, @@ -48,7 +44,8 @@ def create_test_run(start_time, end_time, algorithm, iterations, message_size, t test_suite_id=test_suite_id ) current_app.database_manager.create(test_run) - __create_test_run_metrics(test_run, client_metrics, server_metrics) + if status == Status.SUCCESS: + metrics_service.create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time) return test_run def update_test_suite(test_suite): @@ -87,39 +84,5 @@ def delete_test_suite(test_suite_id): raise NotFoundException('Test suite with id: ' + str(test_suite_id) +' not found', 'Not Found') current_app.database_manager.delete(test_suite) - -def __create_test_run_metrics(test_run, client_metrics, server_metrics): - __save_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run) - __save_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run) - - -def __save_metrics(cpu_metric_name, memory_metric_name, metrics, test_run): - cpu, memory = __calculate_average(metrics, test_run.start_time) - __save_metric_to_db(test_run, cpu_metric_name, cpu, TYPE_CPU) - __save_metric_to_db(test_run, memory_metric_name, memory, TYPE_MEMORY) - - -def __calculate_average(metrics, start_time): - cpu, memory = 0, 0 - counter = 0 - for ts, value in metrics.items(): - if parser.parse(ts) >= start_time.astimezone(pytz.UTC): - cpu += value["cpu"] - memory += value["memory"] - counter += 1 - - if counter == 0: - return 0, 0 - return cpu/counter, memory/counter - -def __save_metric_to_db(test_run, metric_name, metric_value, metric_type): - if metric_type == TYPE_CPU: - metric_value = round(metric_value, 2) - elif metric_type == TYPE_MEMORY: - metric_value = round(metric_value, 0) - test_run_metric = TestRunMetric( - test_run_id=test_run.id, - metric_name=metric_name, - value=metric_value - ) - current_app.database_manager.create(test_run_metric) +def delete_test_suites(test_suite_ids): + current_app.database_manager.delete_by_ids(TestSuite, test_suite_ids) diff --git a/api/src/utils/database_manager.py b/api/src/utils/database_manager.py index 5ad3a409..85dfb8a4 100644 --- a/api/src/utils/database_manager.py +++ b/api/src/utils/database_manager.py @@ -15,6 +15,10 @@ def delete(self, instance): self.__db.session.delete(instance) self.__db.session.commit() + def delete_by_ids(self, model, ids): + self.__db.session.query(model).filter(model.id.in_(ids)).delete(synchronize_session='fetch') + self.__db.session.commit() + def update(self, instance): self.__db.session.commit() diff --git a/api/src/utils/metrics_collection_manager.py b/api/src/utils/metrics_collection_manager.py new file mode 100644 index 00000000..4e5a9a13 --- /dev/null +++ b/api/src/utils/metrics_collection_manager.py @@ -0,0 +1,22 @@ +from src.utils.metrics_collector import MetricsCollector +import logging + +client_collector = MetricsCollector("qujata-curl") +server_collector = MetricsCollector("qujata-nginx") + +# TODO: add lock validation +def start_collecting(): + client_collector.start() + server_collector.start() + +def stop_collecting(): + client_collector.stop() + server_collector.stop() + # print collectors results + logging.info(server_collector.to_pretty_table()) + logging.info(client_collector.to_pretty_table()) + +def get_metrics(): + client_data = client_collector.get_data() + server_data = server_collector.get_data() + return client_data, server_data diff --git a/api/src/utils/test_suite_serializer.py b/api/src/utils/test_suite_serializer.py index 1d13f018..7168049a 100644 --- a/api/src/utils/test_suite_serializer.py +++ b/api/src/utils/test_suite_serializer.py @@ -16,34 +16,38 @@ def serialize(test_suite): def __get_environment_info(env_info): return { - "resourceName": env_info.resource_name, - "operatingSystem": env_info.operating_system, + "resource_name": env_info.resource_name, + "operating_system": env_info.operating_system, "cpu": env_info.cpu, - "cpuArchitecture": env_info.cpu_architecture, - "cpuCores": env_info.cpu_cores, - "cpuClockSpeed": env_info.clock_speed, - "nodeSize": env_info.node_size + "cpu_architecture": env_info.cpu_architecture, + "cpu_cores": env_info.cpu_cores, + "cpu_clock_speed": env_info.clock_speed, + "node_size": env_info.node_size } - def __get_test_runs_metrics(test_runs): test_runs_list = [] for test_run in test_runs: - cpu_avg, memory_avg = __calculate_cpu_memory_avg(test_run.test_run_metrics) + metrics = test_run.test_run_metrics + cpu_avg, memory_avg = __calculate_cpu_memory_avg(metrics) + request_throughput, bytes_throughput = get_throughput_metrics(metrics) results = { "id": test_run.id, "algorithm": test_run.algorithm, "iterations": test_run.iterations, "message_size": test_run.message_size, "results": { - "averageCPU": round(cpu_avg, 2), - "averageMemory": int(memory_avg), + "average_cpu": round(cpu_avg, 2), + "average_memory": int(memory_avg), + "request_throughput": int(request_throughput), + "bytes_throughput": int(bytes_throughput) } } test_runs_list.append(results) return test_runs_list + def __calculate_cpu_memory_avg(test_run_metrics): cpu_avg, memory_avg = 0.00, 0 @@ -54,3 +58,11 @@ def __calculate_cpu_memory_avg(test_run_metrics): memory_avg += metric.value return cpu_avg, memory_avg + + +def get_throughput_metrics(test_run_metrics): + return __find_metric(test_run_metrics, Metric.MESSAGES_THROUGHPUT_PER_SECOND), __find_metric(test_run_metrics, Metric.BYTES_THROUGHPUT_PER_SECOND) + + +def __find_metric(test_run_metrics, metric_name): + return next((metric.value for metric in test_run_metrics if metric.metric_name == metric_name), 0) \ No newline at end of file diff --git a/api/tests/test_analyze_api.py b/api/tests/test_analyze_api.py index 3a9b325d..b9b3657a 100644 --- a/api/tests/test_analyze_api.py +++ b/api/tests/test_analyze_api.py @@ -22,9 +22,11 @@ client_metrics = {str(datetime.now() + timedelta(seconds=30)) + "123Z":{"cpu":3.6, "memory":254}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu":3.8, "memory":234}} server_metrics = {str(datetime.now() + timedelta(seconds=30))+ "123Z":{"cpu":2.3, "memory":154}, str(datetime.now() + timedelta(seconds=36))+ "123Z":{"cpu":2.7, "memory":156}} metrics = [client_metrics, server_metrics] -@patch('src.services.metrics_service.start_collecting', return_value=None) -@patch('src.services.metrics_service.stop_collecting', return_value=None) -@patch('src.services.metrics_service.get_metrics', return_value=metrics) + + +@patch('src.utils.metrics_collection_manager.start_collecting', return_value=None) +@patch('src.utils.metrics_collection_manager.stop_collecting', return_value=None) +@patch('src.utils.metrics_collection_manager.get_metrics', return_value=metrics) class TestAnalyzeAPI(unittest.TestCase): def setUp(self): self.app = Flask(__name__) @@ -33,10 +35,10 @@ def setUp(self): load_config(self.app) self.app.database_manager = Mock(spec=DatabaseManager) - - def test_analyze(self, mock_start_collecting, mock_stop_collecting, mock_get_metrics): + @patch('src.services.metrics_service.parser.parse', side_effect=lambda x: datetime.fromisoformat(x)) + def test_analyze(self, mock_parser, mock_start_collecting, mock_stop_collecting, mock_get_metrics): input_data = { - "algorithms":["kyber512"], + "algorithms": ["kyber512"], "iterationsCount": [1000, 2000], "experimentName": "name", "description": "name", @@ -47,30 +49,47 @@ def test_analyze(self, mock_start_collecting, mock_stop_collecting, mock_get_met with patch(GET_REQUEST) as mock_get: mock_get.return_value.status_code = 200 mock_get.return_value.json.return_value = {} - mock_post.return_value = MagicMock(status_code=200, json=lambda: {'result': 'success'}) + mock_post.return_value = MagicMock(status_code=200, json=lambda: {'result': 'success', 'totalRequestSize': 10000}) + with patch('src.services.analyze_service.datetime') as mock_datetime: + mock_datetime.now.side_effect = [ + datetime(2024, 1, 1, 10, 0, 0),# test suite start time + datetime(2024, 1, 1, 10, 0, 0),# first test run start time + datetime(2024, 1, 1, 10, 2, 0),# first test run end time + datetime(2024, 1, 1, 10, 2, 0),# second test run start time + datetime(2024, 1, 1, 10, 3, 0),# second test run end time + datetime(2024, 1, 1, 10, 3, 0)# test suite end time + ] - response = self.client.post(PATH, - data=json.dumps(input_data), - content_type=CONTENT_TYPE) + response = self.client.post(PATH, + data=json.dumps(input_data), + content_type=CONTENT_TYPE) - - self.assertEqual(self.app.database_manager.create.call_count, 11)# 1 for the test suite, and 2 for test runs and 4*2(8) for test run metrics - db_call = self.app.database_manager.create.call_args_list - self.assertEqual(db_call[2].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU) - self.assertEqual(db_call[2].args[0].value, 3.7) - self.assertEqual(db_call[3].args[0].metric_name, Metric.CLIENT_AVERAGE_MEMORY) - self.assertEqual(db_call[3].args[0].value, 244.0) - self.assertEqual(db_call[4].args[0].metric_name, Metric.SERVER_AVERAGE_CPU) - self.assertEqual(db_call[4].args[0].value, 2.5) - self.assertEqual(db_call[5].args[0].metric_name, Metric.SERVER_AVERAGE_MEMORY) - self.assertEqual(db_call[5].args[0].value, 155.0) - - self.assertEqual(response.status_code, 200) - # Check the response content - response_data = json.loads(response.data) - self.assertIn('test_suite_id', response_data) - self.assertEqual(mock_start_collecting.call_count, 2) - self.assertEqual(mock_stop_collecting.call_count, 2) + + self.assertEqual(self.app.database_manager.create.call_count, 15)# 1 for the test suite, and 2 for test runs and 6*2(12) for test run metrics + db_call = self.app.database_manager.create.call_args_list + self.assertEqual(db_call[2].args[0].metric_name, Metric.CLIENT_AVERAGE_CPU) + self.assertEqual(db_call[2].args[0].value, 3.7) + self.assertEqual(db_call[3].args[0].metric_name, Metric.CLIENT_AVERAGE_MEMORY) + self.assertEqual(db_call[3].args[0].value, 244.0) + self.assertEqual(db_call[4].args[0].metric_name, Metric.SERVER_AVERAGE_CPU) + self.assertEqual(db_call[4].args[0].value, 2.5) + self.assertEqual(db_call[5].args[0].metric_name, Metric.SERVER_AVERAGE_MEMORY) + self.assertEqual(db_call[5].args[0].value, 155.0) + self.assertEqual(db_call[6].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[6].args[0].value, 8.0) + self.assertEqual(db_call[7].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[7].args[0].value, 83.0) + self.assertEqual(db_call[13].args[0].metric_name, Metric.MESSAGES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[13].args[0].value, 33.0) + self.assertEqual(db_call[14].args[0].metric_name, Metric.BYTES_THROUGHPUT_PER_SECOND) + self.assertEqual(db_call[14].args[0].value, 167.0) + + self.assertEqual(response.status_code, 200) + # Check the response content + response_data = json.loads(response.data) + self.assertIn('test_suite_id', response_data) + self.assertEqual(mock_start_collecting.call_count, 2) + self.assertEqual(mock_stop_collecting.call_count, 2) @@ -176,6 +195,7 @@ def test_analyze_with_curl_failure(self, mock_start_collecting, mock_stop_collec data=json.dumps(input_data), content_type=CONTENT_TYPE) self.assertEqual(response.status_code, 200) + self.assertEqual(self.app.database_manager.create.call_count, 2) #dont save metrics for when curl request failed actual_test_run = self.app.database_manager.create.call_args_list[1].args self.assertEqual(actual_test_run[0].status, Status.FAILED) self.assertEqual(actual_test_run[0].status_message, '{"result": "failed"}') diff --git a/api/tests/test_metrics_service.py b/api/tests/test_metrics_collection_manager.py similarity index 87% rename from api/tests/test_metrics_service.py rename to api/tests/test_metrics_collection_manager.py index 734f4e38..bf18297c 100644 --- a/api/tests/test_metrics_service.py +++ b/api/tests/test_metrics_collection_manager.py @@ -1,16 +1,13 @@ import unittest -import logging import time -from datetime import datetime, timezone from unittest.mock import patch, Mock import requests from flask import Flask from config.settings import load_config -import src.services.metrics_service as metrics_service +import src.utils.metrics_collection_manager as metrics_collection_manager import src.services.k8s_service as k8s_service import src.services.cadvisor_service as cadvisor_service -from src.models.test_run import TestRun from src.utils.database_manager import DatabaseManager from kubernetes.client import CoreV1Api, V1PodList, V1Pod, V1PodStatus, V1ObjectMeta, V1ContainerStatus @@ -40,13 +37,13 @@ def test_collecting_docker(self): with patch(POST_REQUEST) as mock_post: mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = docker_response - metrics_service.start_collecting() - self.assertTrue(metrics_service.client_collector._MetricsCollector__locked) - self.assertTrue(metrics_service.server_collector._MetricsCollector__locked) + metrics_collection_manager.start_collecting() + self.assertTrue(metrics_collection_manager.client_collector._MetricsCollector__locked) + self.assertTrue(metrics_collection_manager.server_collector._MetricsCollector__locked) time.sleep(5) - metrics_service.stop_collecting() + metrics_collection_manager.stop_collecting() self.assertEqual(mock_post.call_count, 10) # 10: 5 for curl, and 5 for nginx (2 requests per sec) - actual_curl, actual_nginx = metrics_service.get_metrics() + actual_curl, actual_nginx = metrics_collection_manager.get_metrics() self.assertEqual(actual_curl, expected_curl_metrics_collector_data) self.assertEqual(actual_nginx, expected_nginx_metrics_collector_data) @@ -86,13 +83,13 @@ def test_collecting_k8s_with_cri_containerd(self): mock_list_pod.return_value = mock_pod_list mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = k8s_response - metrics_service.start_collecting() - self.assertTrue(metrics_service.client_collector._MetricsCollector__locked) - self.assertTrue(metrics_service.server_collector._MetricsCollector__locked) + metrics_collection_manager.start_collecting() + self.assertTrue(metrics_collection_manager.client_collector._MetricsCollector__locked) + self.assertTrue(metrics_collection_manager.server_collector._MetricsCollector__locked) time.sleep(5) - metrics_service.stop_collecting() + metrics_collection_manager.stop_collecting() self.assertEqual(mock_post.call_count, 10) # 10: 5 for curl, and 5 for nginx (2 requests per sec) - actual_curl, actual_nginx = metrics_service.get_metrics() + actual_curl, actual_nginx = metrics_collection_manager.get_metrics() self.assertEqual(actual_curl, expected_curl_metrics_collector_data) self.assertEqual(actual_nginx, expected_nginx_metrics_collector_data) @@ -132,13 +129,13 @@ def test_collecting_k8s_with_cri_docker(self): mock_list_pod.return_value = mock_pod_list mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = k8s_response - metrics_service.start_collecting() - self.assertTrue(metrics_service.client_collector._MetricsCollector__locked) - self.assertTrue(metrics_service.server_collector._MetricsCollector__locked) + metrics_collection_manager.start_collecting() + self.assertTrue(metrics_collection_manager.client_collector._MetricsCollector__locked) + self.assertTrue(metrics_collection_manager.server_collector._MetricsCollector__locked) time.sleep(5) - metrics_service.stop_collecting() + metrics_collection_manager.stop_collecting() self.assertEqual(mock_post.call_count, 10) # 10: 5 for curl, and 5 for nginx (2 requests per sec) - actual_curl, actual_nginx = metrics_service.get_metrics() + actual_curl, actual_nginx = metrics_collection_manager.get_metrics() self.assertEqual(actual_curl, expected_curl_metrics_collector_data) self.assertEqual(actual_nginx, expected_nginx_metrics_collector_data) @@ -179,8 +176,8 @@ def test_collecting_k8s_with_unsupported_cri(self, mock_log): mock_list_pod.return_value = mock_pod_list mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = k8s_response - metrics_service.start_collecting() - self.assertFalse(metrics_service.client_collector._MetricsCollector__locked) + metrics_collection_manager.start_collecting() + self.assertFalse(metrics_collection_manager.client_collector._MetricsCollector__locked) self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('cri: unsupported-cri not supported'), exc_info=True)") @@ -190,12 +187,12 @@ def test_collecting_when_locked(self, mock_log): with patch(POST_REQUEST) as mock_post: mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = docker_response - metrics_service.client_collector._MetricsCollector__locked = True - metrics_service.server_collector._MetricsCollector__locked = True - metrics_service.start_collecting() + metrics_collection_manager.client_collector._MetricsCollector__locked = True + metrics_collection_manager.server_collector._MetricsCollector__locked = True + metrics_collection_manager.start_collecting() self.assertEqual(mock_post.call_count, 0) - metrics_service.client_collector._MetricsCollector__locked = False - metrics_service.client_collector._MetricsCollector__locked = False + metrics_collection_manager.client_collector._MetricsCollector__locked = False + metrics_collection_manager.client_collector._MetricsCollector__locked = False self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] collector is already running', exc_info=True)") @@ -206,7 +203,7 @@ def test_collecting_when_init_k8s_failed(self, mock_log): with patch(LIST_NAMESPACED_POD, side_effect=requests.exceptions.RequestException("Test exception")): with patch(POST_REQUEST) as mock_post: k8s_service.init_cluster() - metrics_service.start_collecting() + metrics_collection_manager.start_collecting() self.assertEqual(mock_post.call_count, 0) self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] Failed to collect metrics with error: %s', RequestException('Test exception'), exc_info=True)") @@ -226,7 +223,7 @@ def test_collecting_k8s_when_pod_items_is_empty(self, mock_log): mock_list_pod.return_value = mock_pod_list mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = k8s_response - metrics_service.start_collecting() + metrics_collection_manager.start_collecting() self.assertEqual(mock_post.call_count, 0) self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-curl pod not found'), exc_info=True)") self.assertEqual(str(mock_log.call_args_list[1]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-nginx pod not found'), exc_info=True)") @@ -285,7 +282,7 @@ def test_collecting_k8s_when_cadvisor_pod_not_found(self, mock_log): with patch(POST_REQUEST) as mock_post: mock_post.return_value.status_code = 200 mock_post.return_value.json.return_value = k8s_response - metrics_service.start_collecting() + metrics_collection_manager.start_collecting() self.assertEqual(mock_post.call_count, 0) self.assertEqual(str(mock_log.call_args_list[0]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-cadvisor pod not found with host_ip: 192.168.1.2'), exc_info=True)") self.assertEqual(str(mock_log.call_args_list[1]), "call('[MetricCollector] Failed to collect metrics with error: %s', RuntimeError('qujata-cadvisor pod not found with host_ip: 192.168.1.2'), exc_info=True)") diff --git a/api/tests/test_tests_api.py b/api/tests/test_tests_api.py index 8e52b5bb..3b11103d 100644 --- a/api/tests/test_tests_api.py +++ b/api/tests/test_tests_api.py @@ -13,12 +13,15 @@ from src.models.test_run_metric import TestRunMetric from src.models.env_info import EnvInfo from src.enums.metric import Metric +from src.exceptions.exceptions import ApiException + CONTENT_TYPE_APPLICATION_JSON = 'application/json' TEST_SUITE_NAME = "new name" TEST_SUITE_NOT_FOUND_MSG = 'Test suite with id: 1 not found' NOT_FOUND = 'Not Found' TEST_SUITES_GET_URL = '/api/test_suites/1' +TEST_SUITES_DELETE_URL = '/api/test_suites/delete' class TestTestsAPI(unittest.TestCase): def setUp(self): @@ -54,10 +57,12 @@ def __test_run(self): ) def __test_run_metrics(self): - return [TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_CPU,value=3), - TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_MEMORY,value=5), - TestRunMetric(metric_name=Metric.SERVER_AVERAGE_CPU,value=6), - TestRunMetric(metric_name=Metric.SERVER_AVERAGE_MEMORY,value=9)] + return [TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_CPU, value=3), + TestRunMetric(metric_name=Metric.CLIENT_AVERAGE_MEMORY, value=5), + TestRunMetric(metric_name=Metric.SERVER_AVERAGE_CPU, value=6), + TestRunMetric(metric_name=Metric.SERVER_AVERAGE_MEMORY, value=9), + TestRunMetric(metric_name=Metric.MESSAGES_THROUGHPUT_PER_SECOND, value=50), + TestRunMetric(metric_name=Metric.BYTES_THROUGHPUT_PER_SECOND, value=4500)] def test_get_test_suites(self): self.app.database_manager.list.return_value = [self.__test_suite()] @@ -70,7 +75,7 @@ def test_get_test_suite(self): self.app.database_manager.get_by_id.return_value = test_suite response = self.client.get(TEST_SUITES_GET_URL) result = json.loads(response.data) - expected = {'code_release': '1.1.0', 'description': 'description', 'end_time': None, 'environment_info': {'cpu': None, 'cpuArchitecture': None, 'cpuClockSpeed': None, 'cpuCores': None, 'nodeSize': None, 'operatingSystem': None, 'resourceName': None}, 'id': None, 'name': 'name', 'start_time': None, 'test_runs': [{'algorithm': None, 'id': 1, 'iterations': None, 'message_size': None, 'results': {'averageCPU': 9.0, 'averageMemory': 14}}]} + expected = {'code_release': '1.1.0', 'description': 'description', 'end_time': None, 'environment_info': {'cpu': None, 'cpu_architecture': None, 'cpu_clock_speed': None, 'cpu_cores': None, 'node_size': None, 'operating_system': None, 'resource_name': None}, 'id': None, 'name': 'name', 'start_time': None, 'test_runs': [{'algorithm': None, 'id': 1, 'iterations': None, 'message_size': None, 'results': {'average_cpu': 9.0, 'average_memory': 14, 'request_throughput': 50, 'bytes_throughput': 4500}}]} self.assertEqual(result, expected) def test_get_test_suite_return_not_found(self): @@ -143,4 +148,28 @@ def test_get_test_run_return_not_found(self): self.assertEqual(result, {'error': NOT_FOUND, 'message': 'Test run with id: 1 and test suite id: 1 not found'}) self.assertEqual(response.status_code, 404) + def test_delete_test_suites(self): + input_data = {"ids": [1, 2, 3]} + self.app.database_manager.delete_by_ids.return_value = None + response = self.client.post(TEST_SUITES_DELETE_URL, data=json.dumps(input_data), content_type=CONTENT_TYPE_APPLICATION_JSON) + self.assertEqual(response.status_code, 204) + self.app.database_manager.delete_by_ids.assert_called_once_with(TestSuite, [1, 2, 3]) + + def test_delete_test_suites_missing_ids(self): + input_data = {} # Missing 'ids' key + response = self.client.post(TEST_SUITES_DELETE_URL, data=json.dumps(input_data), content_type=CONTENT_TYPE_APPLICATION_JSON) + result = json.loads(response.data) + self.assertEqual(response.status_code, 400) + self.assertEqual(result, {'error': 'Invalid data provided', 'message': 'Missing properties, required property: ids'}) + self.assertEqual(self.app.database_manager.delete_by_ids.call_count, 0) + + def test_delete_test_suites_api_exception(self): + input_data = {"ids": [1, 2, 3]} + error_message = 'Some error message' + self.app.database_manager.delete_by_ids.side_effect = ApiException(error_message, 'Some error', status_code=500) + response = self.client.post(TEST_SUITES_DELETE_URL, data=json.dumps(input_data), content_type=CONTENT_TYPE_APPLICATION_JSON) + result = json.loads(response.data) + self.assertEqual(response.status_code, 500) + self.assertEqual(result, {'error': 'Some error', 'message': 'Some error message'}) + self.app.database_manager.delete_by_ids.assert_called_once_with(TestSuite, [1, 2, 3]) diff --git a/curl/scripts/run-curl-loop.sh b/curl/scripts/run-curl-loop.sh index 250c74c2..7dd2c67b 100755 --- a/curl/scripts/run-curl-loop.sh +++ b/curl/scripts/run-curl-loop.sh @@ -6,16 +6,34 @@ nginx_port="$2" iteration_count="$3" algorithm="$4" message_size="$5" -num_processes=$(($(getconf _NPROCESSORS_ONLN) * 2)) +url="https://${nginx_host}:${nginx_port}" +header="Content-Type: text/plain" # Generates a random message with the specified size and saves it to a temporary file. # This is useful for cases where the payload is too large to be sent in the curl command as text. generate_message() { local size="$1" tr -dc '[:print:]' /tmp/message.txt } - -# Generate the message +# create the tmp file with generated message generate_message "$message_size" -seq "$iteration_count" | xargs -P "$num_processes" -n 1 -I % curl "https://${nginx_host}:${nginx_port}" -k --curves "${algorithm}" -XPOST -d "@/tmp/message.txt" -H "Content-Type: text/plain" -o /dev/null \ No newline at end of file +# Execute the first curl command and capture output(request size) and errors +response=$(curl -sS -D - "${url}" -k --curves "${algorithm}" -XPOST -d "@/tmp/message.txt" -H "${header}" -o /dev/null 2>/dev/null) +if [ $? -ne 0 ]; then + echo "Error: Failed to execute curl command." + exit 1 +fi +request_size=$(echo "$response" | awk '/Total-Request-Size/ {print $2}') +if [ -z "$request_size" ]; then + request_size=0 +fi + +num_processes=$(($(getconf _NPROCESSORS_ONLN) * 2)) + +# Execute the subsequent curl commands, continue on failure +seq $((iteration_count - 1)) | xargs -P $num_processes -n 1 -I % curl ${url} -k --curves ${algorithm} -XPOST -d "@/tmp/message.txt" -H "${header}" -o /dev/null 2>/dev/null + +wait + +echo -n "$((request_size * iteration_count))" \ No newline at end of file diff --git a/curl/src/curl/curl.controller.spec.ts b/curl/src/curl/curl.controller.spec.ts index 3f5b4f41..d1a9de90 100644 --- a/curl/src/curl/curl.controller.spec.ts +++ b/curl/src/curl/curl.controller.spec.ts @@ -3,6 +3,7 @@ import { CurlController } from './curl.controller'; import { CurlService } from './curl.service'; import { CurlRequest } from '../dto/curl-request.dto'; import { HttpException } from '@nestjs/common'; +import { CurlResponseDto } from "../dto/curl-response.dto"; describe('CurlController', () => { let curlController: CurlController; let curlService: CurlService; @@ -41,7 +42,8 @@ describe('CurlController', () => { iterationsCount: 500, messageSize: 10 }; - const expectedResult = undefined; + const expectedResult = new CurlResponseDto(); + expectedResult.totalRequestSize = 123; jest.spyOn(curlService, 'run').mockResolvedValue(expectedResult); const result = await curlController.create(curlRequest); expect(result).toBe(expectedResult); diff --git a/curl/src/curl/curl.controller.ts b/curl/src/curl/curl.controller.ts index a71264a4..f35c1a1c 100644 --- a/curl/src/curl/curl.controller.ts +++ b/curl/src/curl/curl.controller.ts @@ -1,4 +1,4 @@ -import { Controller, Post, Body, HttpCode, HttpStatus, NotFoundException } from '@nestjs/common'; +import { Controller, Post, Body, HttpCode, HttpStatus } from '@nestjs/common'; import { CurlService } from './curl.service'; import { CurlRequest } from '../dto/curl-request.dto'; @@ -8,8 +8,10 @@ export class CurlController { @Post() @HttpCode(HttpStatus.CREATED) - async create(@Body() curlRequest: CurlRequest): Promise { + async create(@Body() curlRequest: CurlRequest) { console.log('[CurlController:create] In Post func. Body: ', curlRequest); - return await this.curlService.run(curlRequest); + const curlInfo = await this.curlService.run(curlRequest); + console.log('[CurlController:create] In Post func. Response: ', curlInfo) + return curlInfo; } } diff --git a/curl/src/curl/curl.service.spec.ts b/curl/src/curl/curl.service.spec.ts index a3830af1..01d4bc40 100644 --- a/curl/src/curl/curl.service.spec.ts +++ b/curl/src/curl/curl.service.spec.ts @@ -2,9 +2,9 @@ import { Test, TestingModule } from '@nestjs/testing'; import { CurlService } from './curl.service'; import { CurlRequest } from '../dto/curl-request.dto'; import { ConfigModule, ConfigService } from '@nestjs/config'; - import { HttpException, HttpStatus } from '@nestjs/common'; import * as shellJS from 'shelljs'; + jest.mock('shelljs', () => ({ exec: jest.fn(), })); @@ -120,14 +120,17 @@ describe('CurlService', () => { }); describe('runCurls', () => { - it('should call execAsync with the correct command', async () => { + it('should call execAsync with the correct command and return a valid response', async () => { const iterationsCount = 1000; const algorithm = 'kyber512'; const messageSize = 1024; - const execAsyncSpy = jest.spyOn(curlService, 'execAsync').mockResolvedValue(undefined); - await curlService['runCurls'](iterationsCount, algorithm, messageSize); + const expectedResult = '123'; + const execAsyncSpy = jest.spyOn(curlService, 'execAsync').mockResolvedValue(expectedResult); + const result =await curlService['runCurls'](iterationsCount, algorithm, messageSize); const expectedCommand = curlService['format'](`./scripts/run-curl-loop.sh ${configService.get('nginx.host')} ${configService.get('nginx.port')} ${iterationsCount} ${algorithm} ${messageSize}`); expect(execAsyncSpy).toHaveBeenCalledWith(expectedCommand); + expect(result).toEqual({ totalRequestSize: parseInt(expectedResult) }); + }); // Add more test cases for error handling in runCurls. }); diff --git a/curl/src/curl/curl.service.ts b/curl/src/curl/curl.service.ts index cd29f2e1..a90d0bea 100644 --- a/curl/src/curl/curl.service.ts +++ b/curl/src/curl/curl.service.ts @@ -1,6 +1,7 @@ -import { Injectable, HttpException, HttpStatus } from '@nestjs/common'; +import { HttpException, HttpStatus, Injectable } from '@nestjs/common'; import * as shellJS from 'shelljs'; import { CurlRequest } from '../dto/curl-request.dto'; +import { CurlResponseDto } from '../dto/curl-response.dto'; import { ConfigService } from '@nestjs/config'; @Injectable() @@ -16,10 +17,10 @@ export class CurlService { this.CURL_SCRIPT_PATH = "./scripts/run-curl-loop.sh" } - async run(curlRequest: CurlRequest): Promise { + async run(curlRequest: CurlRequest): Promise { this.validate(curlRequest); try { - await this.runCurls(curlRequest.iterationsCount, curlRequest.algorithm, curlRequest.messageSize); + return await this.runCurls(curlRequest.iterationsCount, curlRequest.algorithm, curlRequest.messageSize); } catch (err) { this.processIsRunning = false; console.error('[CurlService:run] Error occurred: ', err); @@ -38,15 +39,18 @@ export class CurlService { } } - private async runCurls(iterationsCount: number, algorithm: string, messageSize: number) { + private async runCurls(iterationsCount: number, algorithm: string, messageSize: number) : Promise { const curlCommand = this.format(`${this.CURL_SCRIPT_PATH} ${this.configService.get('nginx.host')} ${this.configService.get('nginx.port')} ${iterationsCount} ${algorithm} ${messageSize}`); this.processIsRunning = true; - await this.execAsync(curlCommand); + const result = await this.execAsync(curlCommand); console.log('[CurlService:run] Finished taking all curl samples'); this.processIsRunning = false; + return { + totalRequestSize: parseInt(result) + }; } - private execAsync(command): Promise { + private execAsync(command): Promise { return new Promise((resolve, reject) => { shellJS.exec(command, { async: true }, (code, stdout, stderr) => { if (code === 0) { diff --git a/curl/src/dto/curl-response.dto.ts b/curl/src/dto/curl-response.dto.ts new file mode 100644 index 00000000..966756b2 --- /dev/null +++ b/curl/src/dto/curl-response.dto.ts @@ -0,0 +1,3 @@ +export class CurlResponseDto { + totalRequestSize: number; +} diff --git a/nginx/app.js b/nginx/app.js index c068a84a..9377c46e 100644 --- a/nginx/app.js +++ b/nginx/app.js @@ -3,10 +3,10 @@ const app = express(); app.use(express.json()); app.use(express.urlencoded({ extended: true })); app.get('/', function (req, res) { - res.send('Received POST request.\n'); + res.send('Received GET request.\n'); }); app.post('/', function (req, res) { - res.send('Received GET request.'); + res.send('Received POST request.'); }); app.listen(3000, function () { console.log('app listening on port 3000'); diff --git a/nginx/nginx.conf b/nginx/nginx.conf index 59f33205..ddaa0f19 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -61,6 +61,7 @@ http { # } location / { + add_header Total-Request-Size $request_length; proxy_pass http://localhost:3000; } }