Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

calc and save throughput metrics and delete multiple test suites api #114

Merged
merged 8 commits into from
Feb 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion api/src/api/tests_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,23 @@ def get_test_run(test_suite_id, test_run_id):
else:
return jsonify({'error': 'Not Found', 'message':'Test run with id: ' + str(test_run_id) +' and test suite id: '+ str(test_suite_id) +' not found'}), HTTP_STATUS_NOT_FOUND

@api.route('/test_suites/delete', methods=['POST'])
@cross_origin(origins=['*'], supports_credentials=True)
def delete_test_suites():
try:
data = request.get_json()
__validate_delete_test_suites(data)
test_suites_service.delete_test_suites(data['ids'])
return jsonify(), HTTP_STATUS_NO_CONTENT
except (ApiException) as e:
return jsonify({'error': e.error, 'message': e.message}), e.status_code


def __validate_update_test_suite(data):
if not data or 'name' not in data or 'description' not in data:
raise ApiException('Missing properties, required properties: name, description', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST)
raise ApiException('Missing properties, required properties: name, description', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST)


def __validate_delete_test_suites(data):
if not data or 'ids' not in data:
raise ApiException('Missing properties, required property: ids', 'Invalid data provided', HTTP_STATUS_BAD_REQUEST)
23 changes: 11 additions & 12 deletions api/src/services/analyze_service.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
import os
import uuid
import time
import requests
import logging
Expand All @@ -8,7 +6,7 @@
from datetime import datetime, timedelta
from flask import jsonify, current_app
import src.services.test_suites_service as test_suites_service
import src.services.metrics_service as metrics_service
import src.utils.metrics_collection_manager as metrics_collection_manager
from src.enums.status import Status

# constants
Expand Down Expand Up @@ -43,12 +41,12 @@ def analyze(data):

def __create_test_run(algorithm, iterations, message_size, test_suite_id):
start_time = datetime.now()
metrics_service.start_collecting()
status, status_message = __run(algorithm, iterations, message_size)
metrics_service.stop_collecting()
metrics_collection_manager.start_collecting()
status, status_message, requests_size = __run(algorithm, iterations, message_size)
metrics_collection_manager.stop_collecting()
end_time = datetime.now()
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, *metrics_service.get_metrics())
test_suites_service.create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, requests_size, *metrics_collection_manager.get_metrics())


def __run(algorithm, iterations, message_size):
logging.debug('Running test for algorithm: %s ', algorithm)
Expand All @@ -61,10 +59,11 @@ def __run(algorithm, iterations, message_size):
response = requests.post(current_app.configurations.curl_url + "/curl", headers=headers, json=payload, timeout=int(current_app.configurations.request_timeout))

return __validate_response(response)


def __validate_response(response):
if response.status_code < 200 or response.status_code > 299:
return Status.FAILED, json.dumps(response.json())
data = response.json()
if response.status_code < 200 or response.status_code > 299:
return Status.FAILED, json.dumps(data), 0
else:
return Status.SUCCESS, ""
return Status.SUCCESS, "", data.get('totalRequestSize')
75 changes: 55 additions & 20 deletions api/src/services/metrics_service.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,57 @@
from src.utils.metrics_collector import MetricsCollector
import logging

client_collector = MetricsCollector("qujata-curl")
server_collector = MetricsCollector("qujata-nginx")

# TODO: add lock validation
def start_collecting():
client_collector.start()
server_collector.start()

def stop_collecting():
client_collector.stop()
server_collector.stop()
# print collectors results
logging.info(client_collector.to_pretty_table())
logging.info(server_collector.to_pretty_table())

def get_metrics():
client_data = client_collector.get_data()
server_data = server_collector.get_data()
return client_data, server_data
from flask import current_app
from src.models.test_run_metric import TestRunMetric
from src.enums.metric import Metric
import pytz
from dateutil import parser


def create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time):
__save_resources_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run)
__save_resources_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run)
__save_throughput_metrics(Metric.MESSAGES_THROUGHPUT_PER_SECOND, Metric.BYTES_THROUGHPUT_PER_SECOND, start_time,
end_time, requests_size, test_run)


def __save_resources_metrics(cpu_metric_name, memory_metric_name, metrics, test_run):
cpu, memory = __calculate_average(metrics, test_run.start_time)
__save_metric_to_db(test_run, cpu_metric_name, cpu)
__save_metric_to_db(test_run, memory_metric_name, memory)


def __save_throughput_metrics(requests_metric_name, bytes_metric_name, start_time, end_time, requests_size, test_run):
requests_throughput, bytes_throughput = __calculate_throughput(test_run.iterations, start_time, end_time, requests_size)
__save_metric_to_db(test_run, requests_metric_name, requests_throughput)
__save_metric_to_db(test_run, bytes_metric_name, bytes_throughput)


def __save_metric_to_db(test_run, metric_name, metric_value):
test_run_metric = TestRunMetric(
test_run_id=test_run.id,
metric_name=metric_name,
value=metric_value
)
current_app.database_manager.create(test_run_metric)


def __calculate_average(metrics, start_time):
cpu, memory = 0, 0
counter = 0
for ts, value in metrics.items():
if parser.parse(ts) >= start_time.astimezone(pytz.UTC):
cpu += value["cpu"]
memory += value["memory"]
counter += 1

if counter == 0:
return 0, 0
return round(cpu/counter, 2), round(memory/counter, 0)


def __calculate_throughput(iterations, start_time, end_time, requests_size):
seconds = (end_time - start_time).total_seconds()
request_throughput = 0 if seconds == 0 else iterations / seconds
bytes_throughput = 0 if seconds == 0 or requests_size is None else int(requests_size) / seconds
return round(request_throughput, 0), round(bytes_throughput, 0)

51 changes: 7 additions & 44 deletions api/src/services/test_suites_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,10 @@
from src.models.env_info import EnvInfo
from src.models.test_suite import TestSuite
from src.models.test_run import TestRun
from src.models.test_run_metric import TestRunMetric
from src.enums.metric import Metric
from src.exceptions.exceptions import ApiException, NotFoundException
import src.utils.test_suite_serializer as test_suite_serializer
import pytz
from dateutil import parser
import logging

import src.services.metrics_service as metrics_service
from src.enums.status import Status

# constants
HTTP_STATUS_UNPROCESSABLE_ENTITY = 422
Expand All @@ -36,7 +32,7 @@ def create_test_suite(data):
current_app.database_manager.create(test_suite)
return test_suite

def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, client_metrics, server_metrics):
def create_test_run(start_time, end_time, algorithm, iterations, message_size, test_suite_id, status, status_message, requests_size, client_metrics, server_metrics):
test_run = TestRun(
start_time=start_time,
end_time=end_time,
Expand All @@ -48,7 +44,8 @@ def create_test_run(start_time, end_time, algorithm, iterations, message_size, t
test_suite_id=test_suite_id
)
current_app.database_manager.create(test_run)
__create_test_run_metrics(test_run, client_metrics, server_metrics)
if status == Status.SUCCESS:
metrics_service.create(test_run, client_metrics, server_metrics, requests_size, start_time, end_time)
return test_run

def update_test_suite(test_suite):
Expand Down Expand Up @@ -87,39 +84,5 @@ def delete_test_suite(test_suite_id):
raise NotFoundException('Test suite with id: ' + str(test_suite_id) +' not found', 'Not Found')
current_app.database_manager.delete(test_suite)


def __create_test_run_metrics(test_run, client_metrics, server_metrics):
__save_metrics(Metric.CLIENT_AVERAGE_CPU, Metric.CLIENT_AVERAGE_MEMORY, client_metrics, test_run)
__save_metrics(Metric.SERVER_AVERAGE_CPU, Metric.SERVER_AVERAGE_MEMORY, server_metrics, test_run)


def __save_metrics(cpu_metric_name, memory_metric_name, metrics, test_run):
cpu, memory = __calculate_average(metrics, test_run.start_time)
__save_metric_to_db(test_run, cpu_metric_name, cpu, TYPE_CPU)
__save_metric_to_db(test_run, memory_metric_name, memory, TYPE_MEMORY)


def __calculate_average(metrics, start_time):
cpu, memory = 0, 0
counter = 0
for ts, value in metrics.items():
if parser.parse(ts) >= start_time.astimezone(pytz.UTC):
cpu += value["cpu"]
memory += value["memory"]
counter += 1

if counter == 0:
return 0, 0
return cpu/counter, memory/counter

def __save_metric_to_db(test_run, metric_name, metric_value, metric_type):
if metric_type == TYPE_CPU:
metric_value = round(metric_value, 2)
elif metric_type == TYPE_MEMORY:
metric_value = round(metric_value, 0)
test_run_metric = TestRunMetric(
test_run_id=test_run.id,
metric_name=metric_name,
value=metric_value
)
current_app.database_manager.create(test_run_metric)
def delete_test_suites(test_suite_ids):
current_app.database_manager.delete_by_ids(TestSuite, test_suite_ids)
4 changes: 4 additions & 0 deletions api/src/utils/database_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ def delete(self, instance):
self.__db.session.delete(instance)
self.__db.session.commit()

def delete_by_ids(self, model, ids):
self.__db.session.query(model).filter(model.id.in_(ids)).delete(synchronize_session='fetch')
self.__db.session.commit()

def update(self, instance):
self.__db.session.commit()

Expand Down
22 changes: 22 additions & 0 deletions api/src/utils/metrics_collection_manager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
from src.utils.metrics_collector import MetricsCollector
import logging

client_collector = MetricsCollector("qujata-curl")
server_collector = MetricsCollector("qujata-nginx")

# TODO: add lock validation
def start_collecting():
client_collector.start()
server_collector.start()

def stop_collecting():
client_collector.stop()
server_collector.stop()
# print collectors results
logging.info(server_collector.to_pretty_table())
logging.info(client_collector.to_pretty_table())

def get_metrics():
client_data = client_collector.get_data()
server_data = server_collector.get_data()
return client_data, server_data
32 changes: 22 additions & 10 deletions api/src/utils/test_suite_serializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,34 +16,38 @@ def serialize(test_suite):

def __get_environment_info(env_info):
return {
"resourceName": env_info.resource_name,
"operatingSystem": env_info.operating_system,
"resource_name": env_info.resource_name,
"operating_system": env_info.operating_system,
"cpu": env_info.cpu,
"cpuArchitecture": env_info.cpu_architecture,
"cpuCores": env_info.cpu_cores,
"cpuClockSpeed": env_info.clock_speed,
"nodeSize": env_info.node_size
"cpu_architecture": env_info.cpu_architecture,
"cpu_cores": env_info.cpu_cores,
"cpu_clock_speed": env_info.clock_speed,
"node_size": env_info.node_size
}



def __get_test_runs_metrics(test_runs):
test_runs_list = []
for test_run in test_runs:
cpu_avg, memory_avg = __calculate_cpu_memory_avg(test_run.test_run_metrics)
metrics = test_run.test_run_metrics
cpu_avg, memory_avg = __calculate_cpu_memory_avg(metrics)
request_throughput, bytes_throughput = get_throughput_metrics(metrics)
results = {
"id": test_run.id,
"algorithm": test_run.algorithm,
"iterations": test_run.iterations,
"message_size": test_run.message_size,
"results": {
"averageCPU": round(cpu_avg, 2),
"averageMemory": int(memory_avg),
"average_cpu": round(cpu_avg, 2),
"average_memory": int(memory_avg),
"request_throughput": int(request_throughput),
"bytes_throughput": int(bytes_throughput)
}
}
test_runs_list.append(results)
return test_runs_list


def __calculate_cpu_memory_avg(test_run_metrics):
cpu_avg, memory_avg = 0.00, 0

Expand All @@ -54,3 +58,11 @@ def __calculate_cpu_memory_avg(test_run_metrics):
memory_avg += metric.value

return cpu_avg, memory_avg


def get_throughput_metrics(test_run_metrics):
return __find_metric(test_run_metrics, Metric.MESSAGES_THROUGHPUT_PER_SECOND), __find_metric(test_run_metrics, Metric.BYTES_THROUGHPUT_PER_SECOND)


def __find_metric(test_run_metrics, metric_name):
return next((metric.value for metric in test_run_metrics if metric.metric_name == metric_name), 0)
Loading
Loading