Skip to content

Commit

Permalink
Use Top Score instead of Max score
Browse files Browse the repository at this point in the history
  • Loading branch information
shincap8 committed Feb 11, 2025
1 parent a750095 commit ab165f7
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 4 deletions.
29 changes: 26 additions & 3 deletions backend/app/domain/services/base/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,13 @@
import boto3
import numpy as np
import pandas as pd
import yaml

from app.domain.helpers.email import EmailHelper
from app.domain.services.base.dataset import DatasetService
from app.domain.services.builder_and_evaluation.eval_utils.metrics_dicts import (
meta_metrics_dict,
)
from app.infrastructure.repositories.dataset import DatasetRepository
from app.infrastructure.repositories.model import ModelRepository
from app.infrastructure.repositories.round import RoundRepository
Expand Down Expand Up @@ -307,11 +311,30 @@ def calculate_dynascore(
return converted_data

def get_maximun_principal_score_by_task(self, task_id: int) -> float:
yaml_file = self.task_repository.get_config_file_by_task_id(task_id)[0]
yaml_file = yaml.safe_load(yaml_file)
perf_metric = yaml_file.get("perf_metric", {})
if isinstance(perf_metric, list):
evaluation = yaml_file.get("evaluation", None)
main_metric = evaluation.get("main_perf_metric", None)
else:
main_metric = perf_metric.get("type", None)
if main_metric:
metadata = meta_metrics_dict.get(main_metric)
metadata = metadata(None)
direction = metadata.get("utility_direction", None)
else:
direction = 1
scoring_datasets = self.dataset_service.get_scoring_datasets_by_task_id(task_id)
scoring_datasets = [dataset["id"] for dataset in scoring_datasets]
scores = self.score_repository.get_maximun_principal_score_by_task(
task_id, scoring_datasets
)
if direction == -1:
scores = self.score_repository.get_minimum_main_score_by_task(
task_id, scoring_datasets
)
else:
scores = self.score_repository.get_maximun_principal_score_by_task(
task_id, scoring_datasets
)
if scores:
return scores
else:
Expand Down
12 changes: 12 additions & 0 deletions backend/app/infrastructure/repositories/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,18 @@ def get_maximun_principal_score_by_task(self, task_id: int, datasets: list):
.first()
)

def get_minimum_main_score_by_task(self, task_id: int, datasets: list):
return (
self.session.query(Model.name, func.avg(Score.perf).label("perf"))
.filter(Score.did.in_(datasets))
.filter(Score.mid == Model.id)
.filter(Model.tid == task_id)
.filter(Model.is_published)
.group_by(Model.id)
.order_by(func.avg(Score.perf).asc())
.first()
)

def get_downstream_scores(self, dataset_id: int, model_id: int):
return (
self.session.query(Score)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ const PrincipalTaskStats: FC<PrincipalTaskStatsProps> = ({
{maxScore!.toFixed(2)}
</h6>
<p className="text-sm font-medium tracking-widest text-white uppercase ">
Max score
Top score
</p>
</div>
)}
Expand Down

0 comments on commit ab165f7

Please sign in to comment.