Skip to content

Commit

Permalink
Some fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
MaxiBoether committed Sep 23, 2024
1 parent 5cd0131 commit 5f23bc8
Show file tree
Hide file tree
Showing 3 changed files with 11 additions and 7 deletions.
11 changes: 5 additions & 6 deletions modyn/evaluator/internal/grpc/evaluator_grpc_servicer.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,16 +299,16 @@ def get_evaluation_result(
logger.info(f"Received get evaluation result request for evaluation {evaluation_id}.")

if evaluation_id not in self._evaluation_dict:
logger.error(f"Evaluation with id {evaluation_id} has not been registered.")
logger.error(f"Evaluation {evaluation_id} has not been registered.")
return EvaluationResultResponse(valid=False)

self._drain_result_queue(evaluation_id) # Should already be drained, but just make sure

if self._evaluation_process_dict[evaluation_id].process_handler.is_alive():
logger.error(f"Evaluation with id {evaluation_id} is still running.")
logger.error(f"Evaluation {evaluation_id} is still running.")
return EvaluationResultResponse(valid=False)

logger.info("Returning results of all metrics.")
logger.info(f"[Evaluation {evaluation_id}] Returning results of all metrics.")
self._drain_result_queue(evaluation_id) # Should not do anything, but let's make sure

evaluation_data: list[EvaluationIntervalData] = []
Expand All @@ -317,12 +317,11 @@ def get_evaluation_result(
single_eval_data = EvaluationIntervalData(interval_index=interval_idx, evaluation_data=metric_result)
evaluation_data.append(single_eval_data)

num_metrics = len(self._evaluation_dict[evaluation_id].raw_metrics)
expected_results = len(self._evaluation_dict[evaluation_id].not_failed_interval_ids) * num_metrics
expected_results = len(self._evaluation_dict[evaluation_id].not_failed_interval_ids)
if len(evaluation_data) < expected_results:
logger.error(
f"Could not retrieve results for all intervals of evaluation {evaluation_id}. "
f"Expected {len(self._evaluation_dict[evaluation_id].not_failed_interval_ids)} * {num_metrics} = {expected_results} results, "
f"Expected {expected_results} results, "
f"but got {len(evaluation_data)} results. Most likely, an exception happened during evaluation."
)
return EvaluationResultResponse(valid=False)
Expand Down
3 changes: 3 additions & 0 deletions modyn/supervisor/internal/grpc_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -317,6 +317,7 @@ def wait_for_evaluation_completion(self, evaluation_id: int) -> bool:
break # Exit busy wait

if not res.is_running:
logger.info(f"Evaluation {evaluation_id} has finished successfully.")
break # Exit busy wait

sleep(1)
Expand Down Expand Up @@ -348,6 +349,8 @@ def get_evaluation_results(self, evaluation_id: int) -> list[EvaluationIntervalD
logger.error(_msg)
raise RuntimeError(_msg)

logger.debug(f"Obtained evaluation results for evaluation {evaluation_id}")

return res.evaluation_results

def cleanup_evaluations(self, evaluation_ids: list[int]) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,7 +378,9 @@ def get_failure_reason(eval_aborted_reason: EvaluationAbortedReason) -> str:
# This is likely due to an invalid request in the first place.
return failure_reasons

logger.info(f"Evaluation started for model {model_id_to_eval} on intervals {intervals}.")
logger.info(
f"Evaluation {response.evaluation_id} started for model {model_id_to_eval} on intervals {intervals}."
)
started_evaluations.append(response.evaluation_id)
if not self.grpc.wait_for_evaluation_completion(response.evaluation_id):
raise RuntimeError("There was an exception during evaluation") # Trigger retry
Expand Down

0 comments on commit 5f23bc8

Please sign in to comment.