Skip to content

Commit

Permalink
make it easier to debug
Browse files Browse the repository at this point in the history
  • Loading branch information
semio committed Sep 10, 2023
1 parent 27cb7d5 commit 6420a82
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 5 deletions.
5 changes: 4 additions & 1 deletion automation-api/lib/llms/spark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
stop_after_attempt,
)

from lib.app_singleton import app_logger as logger
from lib.config import read_config
from lib.llms.iflytek import SparkClient

Expand Down Expand Up @@ -88,7 +89,9 @@ def _call(
) -> str:
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
return self.generate_text_with_retry(prompt)
output = self.generate_text_with_retry(prompt)
logger.debug(f"Spark: {output}")
return output

@property
def _identifying_params(self) -> Mapping[str, Any]:
Expand Down
14 changes: 10 additions & 4 deletions automation-api/lib/pilot/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ def run_evaluation(
logger.warning(
f"({model_config_id}, {prompt_var_id}, {survey_id}) has been evaluated."
)

session_df = pd.DataFrame.from_records(session_result)
session_df = SessionResultsDf.validate(session_df)
# write result to tmp file.
session_df.to_csv(out_file_path, index=False)
logger.info(f"session saved to {out_file_path}")

return session_df

Expand Down Expand Up @@ -143,7 +143,10 @@ def run_evaluation(
survey_id = get_survey_hash(questions)
survey = (survey_id, questions)

eval_llm = get_model("gpt-3.5-turbo", "OpenAI", {"temperature": 0})
# FIXME: add support to set eval llm and parameters.
eval_llm = get_model(
"gpt-3.5-turbo", "OpenAI", {"temperature": 0, "request_timeout": 120}
)

search_space = list(product(model_configs, prompt_variants))

Expand All @@ -156,8 +159,11 @@ def run_evaluation(
out_dir=args.tmp_dir,
)

with Pool(args.jobs) as p:
session_dfs = p.map(threaded_func, search_space)
if args.jobs == 1:
session_dfs = [threaded_func(v) for v in search_space]
else:
with Pool(args.jobs) as p:
session_dfs = p.map(threaded_func, search_space)

try:
session_df = pd.concat(session_dfs)
Expand Down

0 comments on commit 6420a82

Please sign in to comment.