Skip to content

Commit

Permalink
Fix/configs and llm resp selection (#476)
Browse files Browse the repository at this point in the history
* fix: rename goals prompt

* fix: send hyps to llm based resp selector

* fix: send configs and default values

* fix: configs

* fix: extra file

* fix: extra imports
  • Loading branch information
dilyararimovna authored May 29, 2023
1 parent d9e9a05 commit 633cd43
Show file tree
Hide file tree
Showing 5 changed files with 21 additions and 20 deletions.
2 changes: 1 addition & 1 deletion common/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
logger = logging.getLogger(__name__)

with open("common/prompts/goals_for_prompts.json", "r") as f:
META_PROMPT = json.load(f)["prompt"]
META_GOALS_PROMPT = json.load(f)["prompt"]


def send_request_to_prompted_generative_service(dialog_context, prompt, url, config, timeout, sending_variables):
Expand Down
21 changes: 10 additions & 11 deletions response_selectors/llm_based_response_selector/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
import json
import logging
import numpy as np
import requests
import time
from copy import deepcopy
from os import getenv

import sentry_sdk
from flask import Flask, request, jsonify
from common.prompts import send_request_to_prompted_generative_service
from common.utils import is_toxic_or_badlisted_utterance


Expand Down Expand Up @@ -62,18 +62,15 @@ def select_response_by_scores(hypotheses, scores):

def select_response(dialog_context, hypotheses):
try:
response = requests.post(
response = send_request_to_prompted_generative_service(
dialog_context,
PROMPT + "\nHypotheses:\n" + "\n".join([f'"{hyp["text"]}"' for hyp in hypotheses]),
GENERATIVE_SERVICE_URL,
json={
"dialog_contexts": [dialog_context],
"prompts": [PROMPT],
"configs": [GENERATIVE_SERVICE_CONFIG],
**sending_variables,
},
timeout=GENERATIVE_TIMEOUT,
GENERATIVE_SERVICE_CONFIG,
GENERATIVE_TIMEOUT,
sending_variables,
)
# batch of a list of one string [["this is the response"]]
result = response.json()[0][0]
result = response[0]
except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
Expand Down Expand Up @@ -114,6 +111,7 @@ def respond():
selected_bot_attributes.append(hypotheses[best_id].pop("bot_attributes", {}))
hypotheses[best_id].pop("annotations", {})
selected_attributes.append(hypotheses[best_id])

except Exception as e:
sentry_sdk.capture_exception(e)
logger.exception(e)
Expand All @@ -122,6 +120,7 @@ def respond():
"Selected a response with the highest confidence."
)
selected_resp, best_id = select_response_by_scores(hypotheses, [hyp["confidence"] for hyp in hypotheses])

selected_responses.append(hypotheses[best_id].pop("text"))
selected_skill_names.append(hypotheses[best_id].pop("skill_name"))
selected_confidences.append(hypotheses[best_id].pop("confidence"))
Expand Down
7 changes: 4 additions & 3 deletions services/openai_api_lm/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

import openai
import sentry_sdk
from common.prompts import META_PROMPT
from common.prompts import META_GOALS_PROMPT
from common.universal_templates import GENERATIVE_ROBOT_TEMPLATE
from flask import Flask, request, jsonify
from sentry_sdk.integrations.flask import FlaskIntegration
Expand Down Expand Up @@ -96,7 +96,8 @@ def respond():
st_time = time.time()
contexts = request.json.get("dialog_contexts", [])
prompts = request.json.get("prompts", [])
configs = request.json.get("configs", [])
configs = request.json.get("configs", None)
configs = [None] * len(prompts) if configs is None else configs
configs = [DEFAULT_CONFIGS[PRETRAINED_MODEL_NAME_OR_PATH] if el is None else el for el in configs]
if len(contexts) > 0 and len(prompts) == 0:
prompts = [""] * len(contexts)
Expand Down Expand Up @@ -144,7 +145,7 @@ def generate_goals():
try:
responses = []
for openai_api_key, openai_org, prompt, config in zip(openai_api_keys, openai_orgs, prompts, configs):
context = ["hi", META_PROMPT + f"\nPrompt: '''{prompt}'''\nResult:"]
context = ["hi", META_GOALS_PROMPT + f"\nPrompt: '''{prompt}'''\nResult:"]
goals_for_prompt = generate_responses(context, openai_api_key, openai_org, "", config)[0]
logger.info(f"Generated goals: `{goals_for_prompt}` for prompt: `{prompt}`")
responses += [goals_for_prompt]
Expand Down
7 changes: 4 additions & 3 deletions services/transformers_lm/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from sentry_sdk.integrations.flask import FlaskIntegration
from transformers import AutoModelForCausalLM, AutoTokenizer

from common.prompts import META_PROMPT
from common.prompts import META_GOALS_PROMPT
from common.universal_templates import GENERATIVE_ROBOT_TEMPLATE


Expand Down Expand Up @@ -120,7 +120,8 @@ def respond():
st_time = time.time()
contexts = request.json.get("dialog_contexts", [])
prompts = request.json.get("prompts", [])
configs = request.json.get("configs", [])
configs = request.json.get("configs", None)
configs = [None] * len(prompts) if configs is None else configs
configs = [DEFAULT_CONFIGS[PRETRAINED_MODEL_NAME_OR_PATH] if el is None else el for el in configs]
if len(contexts) > 0 and len(prompts) == 0:
prompts = [""] * len(contexts)
Expand Down Expand Up @@ -161,7 +162,7 @@ def generate_goals():
try:
responses = []
for prompt, config in zip(prompts, configs):
context = ["hi", META_PROMPT + f"\nPrompt: '''{prompt}'''\nResult:"]
context = ["hi", META_GOALS_PROMPT + f"\nPrompt: '''{prompt}'''\nResult:"]
goals_for_prompt = generate_responses(context, model, tokenizer, "", config)[0]
logger.info(f"Generated goals: `{goals_for_prompt}` for prompt: `{prompt}`")
responses += [goals_for_prompt]
Expand Down
4 changes: 2 additions & 2 deletions services/transformers_peft_lm/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from sentry_sdk.integrations.flask import FlaskIntegration
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig

from common.prompts import META_PROMPT
from common.prompts import META_GOALS_PROMPT
from common.universal_templates import GENERATIVE_ROBOT_TEMPLATE


Expand Down Expand Up @@ -146,7 +146,7 @@ def generate_goals():
try:
responses = []
for prompt in prompts:
context = ["hi", META_PROMPT + f"\nPrompt: '''{prompt}'''\nResult:"]
context = ["hi", META_GOALS_PROMPT + f"\nPrompt: '''{prompt}'''\nResult:"]
goals_for_prompt = generate_responses(context, model, tokenizer, "")[0]
logger.info(f"Generated goals: `{goals_for_prompt}` for prompt: `{prompt}`")
responses += [goals_for_prompt]
Expand Down

0 comments on commit 633cd43

Please sign in to comment.