From 789bba67db47d5431bedf828c05a7aa3bec0fe77 Mon Sep 17 00:00:00 2001 From: "david.smith" Date: Fri, 27 Oct 2023 18:13:26 +0300 Subject: [PATCH] adding prompts to customise behaviour of application --- llm-server/integrations/custom_prompts/slack.py | 2 +- .../routes/workflow/extractors/extract_body.py | 13 +++++++++++-- .../routes/workflow/generate_openapi_payload.py | 5 +++-- llm-server/routes/workflow/utils/run_openapi_ops.py | 7 ++++++- llm-server/utils/make_api_call.py | 1 + 5 files changed, 22 insertions(+), 6 deletions(-) diff --git a/llm-server/integrations/custom_prompts/slack.py b/llm-server/integrations/custom_prompts/slack.py index f92522669..43c9a0bf3 100644 --- a/llm-server/integrations/custom_prompts/slack.py +++ b/llm-server/integrations/custom_prompts/slack.py @@ -1,3 +1,3 @@ flow_generation_prompts = "" -api_generation_prompt = "" +api_generation_prompt = "Use encoded value for channel" diff --git a/llm-server/routes/workflow/extractors/extract_body.py b/llm-server/routes/workflow/extractors/extract_body.py index 574d4cc6d..12e431d5e 100644 --- a/llm-server/routes/workflow/extractors/extract_body.py +++ b/llm-server/routes/workflow/extractors/extract_body.py @@ -6,6 +6,7 @@ from typing import Any, Optional from routes.workflow.extractors.extract_json import extract_json_payload from custom_types.t_json import JsonData +import importlib import logging openai_api_key = os.getenv("OPENAI_API_KEY") @@ -16,7 +17,7 @@ def gen_body_from_schema( body_schema: str, text: str, prev_api_response: str, - example: str, + app: Optional[str], current_state: Optional[str], ) -> Any: chat = ChatOpenAI( @@ -25,6 +26,11 @@ def gen_body_from_schema( temperature=0, ) + if app: + module_name = f"integrations.custom_prompts.{app}" + module = importlib.import_module(module_name) + api_generation_prompt = getattr(module, "api_generation_prompt") + messages = [ SystemMessage( content="You are an intelligent machine learning model that can produce REST API's body in json format" @@ -37,10 +43,13 @@ def gen_body_from_schema( HumanMessage(content="prev api responses: {}".format(prev_api_response)), HumanMessage(content="current_state: {}".format(current_state)), HumanMessage( - content="Given the provided information, generate the appropriate minified JSON payload to use as body for the API request. Avoid using fields that are not required, and user input doesnot require it." + content="Given the provided information, generate the appropriate minified JSON payload to use as body for the API request. If a user doesn't provide a required parameter, use sensible defaults for required params, and leave optional params" ), ] + if api_generation_prompt is not None: + messages.append(HumanMessage(content="{}".format(api_generation_prompt))) + result = chat(messages) logging.info("[OpenCopilot] LLM Body Response: {}".format(result.content)) diff --git a/llm-server/routes/workflow/generate_openapi_payload.py b/llm-server/routes/workflow/generate_openapi_payload.py index 96e72f3a9..7fd36e089 100644 --- a/llm-server/routes/workflow/generate_openapi_payload.py +++ b/llm-server/routes/workflow/generate_openapi_payload.py @@ -85,6 +85,7 @@ def generate_openapi_payload( text: str, _operation_id: str, prev_api_response: str, + app: Optional[str], current_state: Optional[str], ) -> ApiInfo: ( @@ -119,12 +120,12 @@ def generate_openapi_payload( ) if api_info.body_schema: - example = gen_ex_from_schema(api_info.body_schema) + # example = gen_ex_from_schema(api_info.body_schema) api_info.body_schema = gen_body_from_schema( json.dumps(api_info.body_schema, separators=(",", ":")), text, prev_api_response, - example, + app, current_state, ) diff --git a/llm-server/routes/workflow/utils/run_openapi_ops.py b/llm-server/routes/workflow/utils/run_openapi_ops.py index fd99d036b..fccd20547 100644 --- a/llm-server/routes/workflow/utils/run_openapi_ops.py +++ b/llm-server/routes/workflow/utils/run_openapi_ops.py @@ -32,7 +32,12 @@ def run_openapi_operations( # refresh state after every api call, we can look into optimizing this later as well operation_id = step.get("open_api_operation_id") api_payload = generate_openapi_payload( - swagger_json, text, operation_id, prev_api_response, current_state + swagger_json, + text, + operation_id, + prev_api_response, + app, + current_state, ) api_response = make_api_request(headers=headers, **api_payload.__dict__) diff --git a/llm-server/utils/make_api_call.py b/llm-server/utils/make_api_call.py index abcfe46e8..5df23edd5 100644 --- a/llm-server/utils/make_api_call.py +++ b/llm-server/utils/make_api_call.py @@ -35,6 +35,7 @@ def make_api_request( ) -> Response: try: endpoint = replace_url_placeholders(endpoint, path_params) + print(f"Endpoint: {endpoint}") url = servers[0] + endpoint # Create a session and configure it with headers session = requests.Session()