From 5f33b1082774ce3126c66704bd6fed4b8197bef3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sara=20Hincapi=C3=A9=20M?= <43832784+shincap8@users.noreply.github.com> Date: Thu, 6 Feb 2025 17:05:06 -0500 Subject: [PATCH] Feature - New Metrics (#335) * Update model methods to run background task when creating the endpoint and creating score endpoint for runpod to use * Handle error from Runpod, URL inference from yaml file * Update backend/app/api/endpoints/base/score.py Co-authored-by: Rafael Mosquera * Rename method * rename endpoint * Print background tasks and correct typo * Correct typos and correct schemas * model uid * remove c * New metrics for MLSuperb challenge * Correct pretty name * replace standard accuracy for standard LID Accuracy, and dialect accuracy for dialect LID accuracy * change metric name to show in front end --------- Co-authored-by: Rafael Mosquera --- api/evaluation/metrics/metrics.py | 2 +- .../services/builder_and_evaluation/eval_utils/metrics.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/evaluation/metrics/metrics.py b/api/evaluation/metrics/metrics.py index 0f72ba1c..35ae8a2c 100644 --- a/api/evaluation/metrics/metrics.py +++ b/api/evaluation/metrics/metrics.py @@ -175,7 +175,7 @@ def get_STD_CER_meta(task=None): def get_CER_15_WORSE_meta(task=None): return { "unit": "%", - "pretty_name": "CER 15 WORSE", + "pretty_name": "CER 15 WORST", "utility_direction": -1, "offset": 0, } diff --git a/backend/app/domain/services/builder_and_evaluation/eval_utils/metrics.py b/backend/app/domain/services/builder_and_evaluation/eval_utils/metrics.py index 30b93d30..8225dbe6 100644 --- a/backend/app/domain/services/builder_and_evaluation/eval_utils/metrics.py +++ b/backend/app/domain/services/builder_and_evaluation/eval_utils/metrics.py @@ -205,7 +205,7 @@ def get_STD_CER_meta(task=None): def get_CER_15_WORSE_meta(task=None): return { "unit": "%", - "pretty_name": "CER_15_WORSE", + "pretty_name": "CER_15_WORST", "utility_direction": -1, "offset": 0, }