Skip to content

Commit

Permalink
Merge pull request #13 from martynov-dm/martynov_complex_graph_gen
Browse files Browse the repository at this point in the history
Martynov Complex Graph Generation algorithm
Also added `model` parameter to a `TopicGraphGenerator.invoke` signature
NotBioWaste905 authored Dec 23, 2024
2 parents 0e05fb1 + 331ab7a commit bd6090f
Showing 8 changed files with 3,170 additions and 66 deletions.
Original file line number Diff line number Diff line change
@@ -3,6 +3,7 @@
import abc
from chatsky_llm_autoconfig.graph import BaseGraph
from chatsky_llm_autoconfig.dialogue import Dialogue
from langchain_core.language_models.chat_models import BaseChatModel


class BaseAlgorithm(BaseModel, abc.ABC):
@@ -71,7 +72,7 @@ async def ainvoke(self, topic: str, graph: BaseGraph) -> BaseGraph:
class TopicGraphGenerator(BaseAlgorithm):
"""Graph generator that works only with topics."""

def invoke(self, topic: str) -> BaseGraph:
def invoke(self, topic: str, model: BaseChatModel) -> BaseGraph:
raise NotImplementedError

async def ainvoke(self, topic: str) -> BaseGraph:
Original file line number Diff line number Diff line change
@@ -2,92 +2,90 @@
from chatsky_llm_autoconfig.algorithms.base import TopicGraphGenerator
from chatsky_llm_autoconfig.autometrics.registry import AlgorithmRegistry
from chatsky_llm_autoconfig.schemas import DialogueGraph
from langchain_openai import ChatOpenAI

from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser

from chatsky_llm_autoconfig.graph import BaseGraph, Graph
import os
from langchain_core.language_models.chat_models import BaseChatModel

from pydantic import SecretStr
from pydantic import Field
from typing import ClassVar


@AlgorithmRegistry.register(input_type=str, output_type=BaseGraph)
class CycleGraphGenerator(TopicGraphGenerator):
"""Generator specifically for topic-based cyclic graphs"""

prompt: str = ""
cycle_graph_generation_prompt: str = ""
DEFAULT_TEMPLATE: ClassVar[str] = """
Create a complex dialogue graph where the conversation MUST return to an existing node.
**CRITICAL: Response Specificity**
Responses must acknowledge and build upon what the user has already specified:
INCORRECT flow:
- User: "I'd like to order a coffee"
- Staff: "What would you like to order?" (TOO GENERAL - ignores that they specified coffee)
CORRECT flow:
- User: "I'd like to order a coffee"
- Staff: "What kind of coffee would you like?" (GOOD - acknowledges they want coffee)
Example of a CORRECT cyclic graph for a coffee shop:
"edges": [
{{ "source": 1, "target": 2, "utterances": ["Hi, I'd like to order a coffee"] }},
{{ "source": 2, "target": 3, "utterances": ["A large latte please"] }},
{{ "source": 3, "target": 4, "utterances": ["Yes, that's correct"] }},
{{ "source": 4, "target": 5, "utterances": ["Here's my payment"] }},
{{ "source": 5, "target": 2, "utterances": ["I'd like to order another coffee"] }}
],
"nodes": [
{{ "id": 1, "label": "welcome", "is_start": true, "utterances": ["Welcome! How can I help you today?"] }},
{{ "id": 2, "label": "ask_coffee_type", "is_start": false, "utterances": ["What kind of coffee would you like?"] }},
{{ "id": 3, "label": "confirm", "is_start": false, "utterances": ["That's a large latte. Is this correct?"] }},
{{ "id": 4, "label": "payment", "is_start": false, "utterances": ["Great! That'll be $5. Please proceed with payment."] }},
{{ "id": 5, "label": "completed", "is_start": false, "utterances": ["Thank you! Would you like another coffee?"] }}
]
**Rules:**
1) Responses must acknowledge what the user has already specified
2) The final node MUST connect back to an existing node
3) Each node must have clear purpose
4) Return ONLY the JSON without commentary
5) Graph must be cyclic - no dead ends
6) All edges must connect to existing nodes
7) The cycle point should make logical sense
**Your task is to create a cyclic dialogue graph about the following topic:** {topic}.
"""

cycle_graph_generation_prompt: PromptTemplate = Field(
default_factory=lambda: PromptTemplate.from_template(CycleGraphGenerator.DEFAULT_TEMPLATE)
)

def __init__(self, prompt: Optional[PromptTemplate] = None):
super().__init__()
self.cycle_graph_generation_prompt = (
prompt
if prompt
else PromptTemplate.from_template(
"""
Create a cyclic dialogue graph where the conversation MUST return to an existing node.
**CRITICAL: Response Specificity**
Responses must acknowledge and build upon what the user has already specified:
INCORRECT flow:
- User: "I'd like to order a coffee"
- Staff: "What would you like to order?" (TOO GENERAL - ignores that they specified coffee)
CORRECT flow:
- User: "I'd like to order a coffee"
- Staff: "What kind of coffee would you like?" (GOOD - acknowledges they want coffee)
Example of a CORRECT cyclic graph for a coffee shop:
"edges": [
{{ "source": 1, "target": 2, "utterances": ["Hi, I'd like to order a coffee"] }},
{{ "source": 2, "target": 3, "utterances": ["A large latte please"] }},
{{ "source": 3, "target": 4, "utterances": ["Yes, that's correct"] }},
{{ "source": 4, "target": 5, "utterances": ["Here's my payment"] }},
{{ "source": 5, "target": 2, "utterances": ["I'd like to order another coffee"] }}
],
"nodes": [
{{ "id": 1, "label": "welcome", "is_start": true, "utterances": ["Welcome! How can I help you today?"] }},
{{ "id": 2, "label": "ask_coffee_type", "is_start": false, "utterances": ["What kind of coffee would you like?"] }},
{{ "id": 3, "label": "confirm", "is_start": false, "utterances": ["That's a large latte. Is this correct?"] }},
{{ "id": 4, "label": "payment", "is_start": false, "utterances": ["Great! That'll be $5. Please proceed with payment."] }},
{{ "id": 5, "label": "completed", "is_start": false, "utterances": ["Thank you! Would you like another coffee?"] }}
]
**Rules:**
1) Responses must acknowledge what the user has already specified
2) The final node MUST connect back to an existing node
3) Each node must have clear purpose
4) Return ONLY the JSON without commentary
5) Graph must be cyclic - no dead ends
6) All edges must connect to existing nodes
7) The cycle point should make logical sense
**Your task is to create a cyclic dialogue graph about the following topic:** {topic}.
"""
)
)
if prompt is not None:
self.cycle_graph_generation_prompt = prompt

def invoke(self, topic: str) -> BaseGraph:
def invoke(self, topic: str, model: BaseChatModel) -> BaseGraph:
"""
Generate a cyclic dialogue graph based on the topic input.
:param input_data: TopicInput containing the topic
:return: Generated Graph object with cyclic structure
Args:
topic (str): The topic for the dialogue graph
model_name (str): The name of the model to use
Returns:
BaseGraph: Generated Graph object with cyclic structure
"""
parser = JsonOutputParser(pydantic_object=DialogueGraph)
model = ChatOpenAI(model="gpt-4o", api_key=SecretStr(os.getenv("OPENAI_API_KEY") or ""), base_url=os.getenv("OPENAI_BASE_URL"), temperature=0)

chain = self.cycle_graph_generation_prompt | model | parser

generated_graph = chain.invoke({"topic": topic})

return Graph(generated_graph)

async def ainvoke(self, *args, **kwargs):
"""
Async version of invoke - to be implemented
"""
pass


if __name__ == "__main__":
cycle_graph_generator = CycleGraphGenerator()
Original file line number Diff line number Diff line change
@@ -367,7 +367,7 @@
],
"metadata": {
"kernelspec": {
"display_name": "dff-llm-integration-VcuUrJCU-py3.12",
"display_name": ".venv",
"language": "python",
"name": "python3"
},
@@ -381,7 +381,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
"version": "3.12.7"
}
},
"nbformat": 4,
369 changes: 369 additions & 0 deletions experiments/2024.12.10_complex_graph_gen/complex_graph_gen.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,369 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_openai import ChatOpenAI\n",
"from dotenv import load_dotenv\n",
"import os\n",
"load_dotenv() "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Test Graph Creation"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"from langchain.prompts import PromptTemplate\n",
"\n",
"simple_graph_prompt = PromptTemplate.from_template(\n",
" \"\"\"\n",
"Create a dialogue graph for a {topic} conversation that follows these rules:\n",
"\n",
"1. Each assistant message (node) must be a clear question or statement that expects a specific type of response\n",
"2. Each user message (edge) must directly answer or respond to the previous assistant message\n",
"3. Include these basic flows:\n",
" - Main success path (completing the primary task)\n",
" - Early exit path (user decides not to proceed)\n",
" - Return path (user wants to modify earlier choice)\n",
"\n",
"Example of correct flow:\n",
"Assistant: \"What type of coffee would you like?\"\n",
"User: \"A latte please\"\n",
"Assistant: \"Would you like that hot or iced?\"\n",
"User: \"Hot please\"\n",
"\n",
"Example of incorrect flow:\n",
"Assistant: \"What type of coffee would you like?\"\n",
"User: \"No thank you\" (This response doesn't match the question)\n",
"\n",
"Format:\n",
"{{\n",
" \"edges\": [\n",
" {{ \"source\": 1, \"target\": 2, \"utterances\": [\"I'd like a coffee\"] }},\n",
" {{ \"source\": 2, \"target\": 3, \"utterances\": [\"A latte please\"] }}\n",
" ],\n",
" \"nodes\": [\n",
" {{ \"id\": 1, \"label\": \"welcome\", \"is_start\": true, \"utterances\": [\"Welcome! How can I help?\"] }},\n",
" {{ \"id\": 2, \"label\": \"ask_drink\", \"is_start\": false, \"utterances\": [\"What would you like to drink?\"] }}\n",
" ]\n",
"}}\n",
"\n",
"Return ONLY the JSON without any additional text.\n",
"\"\"\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from chatsky_llm_autoconfig.algorithms.dialogue_generation import DialogueSampler\n",
"from chatsky_llm_autoconfig.algorithms.topic_graph_generation import CycleGraphGenerator\n",
"from chatsky_llm_autoconfig.metrics.llm_metrics import are_triplets_valid\n",
"from langchain_openai import ChatOpenAI\n",
"from pathlib import Path\n",
"import os\n",
"import json\n",
"from datetime import datetime\n",
"\n",
"\n",
"def generate_valid_dialogues(topics: list[str], output_dir: str = \"generated_datasets\") -> None:\n",
" # Setup\n",
" Path(output_dir).mkdir(exist_ok=True)\n",
" \n",
" # Initialize with default prompt if complex_graph_prompt is not defined\n",
" graph_generator = CycleGraphGenerator(prompt=simple_graph_prompt) # Remove prompt parameter if not needed\n",
" sampler = DialogueSampler()\n",
" gen_model = ChatOpenAI(\n",
" model='o1-mini',\n",
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
" base_url=os.getenv(\"OPENAI_BASE_URL\"),\n",
" )\n",
" \n",
" valid_model = ChatOpenAI(\n",
" model='gpt-4o',\n",
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
" base_url=os.getenv(\"OPENAI_BASE_URL\"),\n",
" )\n",
" \n",
" valid_results = []\n",
" \n",
" # Generate and validate graphs\n",
" for topic in topics:\n",
" try:\n",
" # Generate and validate using the model name from your ChatOpenAI instance\n",
" graph = graph_generator.invoke(topic=topic, model=gen_model)\n",
" validation_result = are_triplets_valid(graph, valid_model)\n",
" \n",
" print(f\"Topic: {topic}\")\n",
" print(f\"Validation result: {validation_result}\")\n",
" \n",
" # If valid, sample dialogue and save\n",
" if validation_result[\"value\"]:\n",
" sampled_dialogues = sampler.invoke(graph, 1, -1)\n",
" valid_results.append({\n",
" \"graph\": graph.graph_dict,\n",
" \"topic\": topic,\n",
" \"dialogues\": [d.model_dump() for d in sampled_dialogues],\n",
" \"validation_result\": validation_result\n",
" })\n",
" print(f\"✅ Valid dialogue generated for topic: {topic}\")\n",
" else:\n",
" print(f\"❌ Invalid dialogue for topic: {topic}\")\n",
" \n",
" except Exception as e:\n",
" print(f\"Error processing {topic}: {str(e)}\")\n",
" continue\n",
"\n",
" # Save results\n",
" if valid_results:\n",
" output_file = Path(output_dir) / f\"valid_dialogues_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json\"\n",
" with open(output_file, 'w', encoding='utf-8') as f:\n",
" json.dump(valid_results, f, ensure_ascii=False, indent=2)\n",
" print(f\"\\nSaved {len(valid_results)} valid dialogues to: {output_file}\")\n",
"\n",
"\n",
"\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"# Run generation\n",
"topics = [\n",
" \"medical appointment scheduling\",\n",
" \"food delivery service\",\n",
" \"fitness membership registration\",\n",
" \"apartment rental application\",\n",
" \"tech support assistance\",\n",
" \"travel package booking\",\n",
" \"insurance policy purchase\",\n",
" \"pet grooming service\",\n",
" \"moving service arrangement\",\n",
" \"home cleaning service\"\n",
"]\n",
"\n",
"\n",
"# generate_valid_dialogues(topics)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"from chatsky_llm_autoconfig.algorithms.dialogue_generation import DialogueSampler\n",
"from chatsky_llm_autoconfig.graph import Graph\n",
"import json\n",
"from pathlib import Path\n",
"\n",
"# Initialize sampler\n",
"sampler = DialogueSampler()\n",
"\n",
"# Get all JSON files in the directory\n",
"dataset_dir = Path(\"generated_datasets\")\n",
"json_files = list(dataset_dir.glob(\"*.json\"))\n",
"\n",
"for json_path in json_files:\n",
" print(f\"\\nProcessing file: {json_path.name}\")\n",
" print(\"=\" * 80)\n",
" \n",
" # Load JSON file\n",
" with open(json_path, \"r\") as f:\n",
" data = json.load(f)\n",
"\n",
" # Process each graph\n",
" for entry in data:\n",
" print(f\"\\nTopic: {entry['topic']}\")\n",
" graph = Graph(entry[\"graph\"])\n",
" \n",
" # Sample dialogues\n",
" dialogues = sampler.invoke(graph, 1, -1)\n",
" \n",
" # Print all dialogues\n",
" for i, dialogue in enumerate(dialogues, 1):\n",
" print(f\"\\nDialogue {i}:\")\n",
" for message in dialogue.messages:\n",
" print(f\"- {message}\")\n",
" \n",
" print(\"-\" * 50)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Processing graphs from: graph_gen/graph_gen.json\n",
"\n",
"Validating graph with topic: library\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:openai._base_client:Retrying request to /chat/completions in 0.386954 seconds\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n",
"INFO:httpx:HTTP Request: POST http://193.187.173.33:8002/api/providers/openai/v1/chat/completions \"HTTP/1.1 200 OK\"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Validation result: {'value': True, 'description': 'All transitions are valid.'}\n",
"✅ Valid dialogue generated for topic: library\n",
"\n",
"Saved 1 valid dialogues to: valid_complex_graphs/valid_dialogues_20241217_172116.json\n"
]
}
],
"source": [
"from chatsky_llm_autoconfig.algorithms.dialogue_generation import DialogueSampler\n",
"from chatsky_llm_autoconfig.graph import BaseGraph\n",
"from chatsky_llm_autoconfig.metrics.llm_metrics import are_triplets_valid\n",
"from langchain_openai import ChatOpenAI\n",
"from pathlib import Path\n",
"import os\n",
"import json\n",
"from datetime import datetime\n",
"\n",
"\n",
"def check_existing_graphs_and_sample() -> None:\n",
" sampler = DialogueSampler()\n",
" \n",
" valid_model = ChatOpenAI(\n",
" model='gpt-4o',\n",
" api_key=os.getenv(\"OPENAI_API_KEY\"),\n",
" base_url=os.getenv(\"OPENAI_BASE_URL\"),\n",
" )\n",
" \n",
" # Path to the directory containing generated datasets\n",
" datasets_dir = Path(\"graph_gen\")\n",
" \n",
" valid_results = []\n",
" \n",
" # Iterate through json files in the datasets directory\n",
" for file_path in datasets_dir.glob(\"*.json\"):\n",
" try:\n",
" # Load existing graph array\n",
" with open(file_path, 'r', encoding='utf-8') as f:\n",
" graphs_data = json.load(f)\n",
" \n",
" print(f\"\\nProcessing graphs from: {file_path}\")\n",
" \n",
" # Process each graph in the array\n",
" for graph_data in graphs_data:\n",
" print(f\"\\nValidating graph with topic: {graph_data['topic']}\")\n",
" \n",
" graph_obj: BaseGraph = Graph(graph_data['graph'])\n",
"\n",
" # Validate triplets\n",
" validation_result = are_triplets_valid(graph_obj, valid_model)\n",
" print(f\"Validation result: {validation_result}\")\n",
" \n",
" \n",
" if validation_result[\"value\"]:\n",
" sampled_dialogues = sampler.invoke(graph_obj, 1, -1)\n",
" valid_results.append({\n",
" \"graph\": graph_data['graph'],\n",
" \"topic\": graph_data['topic'],\n",
" \"dialogues\": [d.model_dump() for d in sampled_dialogues],\n",
" \"validation_result\": validation_result\n",
" })\n",
" print(f\"✅ Valid dialogue generated for topic: {graph_data['topic']}\")\n",
" else:\n",
" print(f\"❌ Invalid dialogue for topic: {graph_data['topic']}\")\n",
" \n",
" except Exception as e:\n",
" print(f\"Error processing {file_path}: {str(e)}\")\n",
" continue\n",
"\n",
" # Save results\n",
" if valid_results:\n",
" output_dir = Path(\"valid_complex_graphs\")\n",
" output_dir.mkdir(exist_ok=True)\n",
" \n",
" output_file = output_dir / f\"valid_dialogues_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json\"\n",
" with open(output_file, 'w', encoding='utf-8') as f:\n",
" json.dump(valid_results, f, ensure_ascii=False, indent=2)\n",
" print(f\"\\nSaved {len(valid_results)} valid dialogues to: {output_file}\")\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" check_existing_graphs_and_sample()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
219 changes: 219 additions & 0 deletions experiments/2024.12.10_complex_graph_gen/graph_gen/graph_gen.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,219 @@
[
{
"graph": {
"nodes": [
{
"id": 1,
"label": "welcome",
"is_start": true,
"utterances": [
"Welcome to the City Library! How may I assist you today?"
]
},
{
"id": 2,
"label": "request_card_number",
"is_start": false,
"utterances": [
"Could you please provide your library card number?"
]
},
{
"id": 3,
"label": "ask_book_details",
"is_start": false,
"utterances": [
"What book are you looking for?"
]
},
{
"id": 4,
"label": "check_availability",
"is_start": false,
"utterances": [
"Let me check if the book is currently available."
]
},
{
"id": 11,
"label": "book_available",
"is_start": false,
"utterances": [
"I've checked and the book is available."
]
},
{
"id": 12,
"label": "book_unavailable",
"is_start": false,
"utterances": [
"I've checked and the book is currently unavailable."
]
},
{
"id": 5,
"label": "offer_alternatives",
"is_start": false,
"utterances": [
"Would you like to place a hold, or would you prefer to see similar books that are available now?"
]
},
{
"id": 6,
"label": "place_hold",
"is_start": false,
"utterances": [
"I've placed a hold on the book for you. You'll be notified by email when it becomes available. Would you like to search for any other books?"
]
},
{
"id": 7,
"label": "show_similar",
"is_start": false,
"utterances": [
"We have similar books available: 'This Side of Paradise' and 'The Beautiful and Damned'. Would you like to check out any of them?"
]
},
{
"id": 8,
"label": "checkout_process",
"is_start": false,
"utterances": [
"Would you like to check it out now?"
]
},
{
"id": 9,
"label": "confirm_checkout",
"is_start": false,
"utterances": [
"I've checked out the book to your account. It's due in 3 weeks. Would you like to search for any other books?"
]
},
{
"id": 10,
"label": "end",
"is_start": false,
"utterances": [
"Thank you for using our library services. Have a great day!"
]
}
],
"edges": [
{
"source": 1,
"target": 2,
"utterances": [
"I'd like to borrow a book"
]
},
{
"source": 2,
"target": 3,
"utterances": [
"My card number is L123456"
]
},
{
"source": 3,
"target": 4,
"utterances": [
"I'm looking for The Great Gatsby"
]
},
{
"source": 4,
"target": 11,
"utterances": [
"Okay, please check"
]
},
{
"source": 4,
"target": 12,
"utterances": [
"Okay, please check"
]
},
{
"source": 11,
"target": 8,
"utterances": [
"Great, I'd like to proceed"
]
},
{
"source": 12,
"target": 5,
"utterances": [
"Oh, I see"
]
},
{
"source": 5,
"target": 6,
"utterances": [
"I'd like to place a hold"
]
},
{
"source": 5,
"target": 7,
"utterances": [
"I'd like to see similar books"
]
},
{
"source": 6,
"target": 3,
"utterances": [
"Yes, I'd like to find another book"
]
},
{
"source": 6,
"target": 10,
"utterances": [
"No, that's all for today"
]
},
{
"source": 7,
"target": 8,
"utterances": [
"Yes, I'll take 'This Side of Paradise'"
]
},
{
"source": 7,
"target": 3,
"utterances": [
"No, I'd like to look for something else"
]
},
{
"source": 8,
"target": 9,
"utterances": [
"Yes, I'd like to check it out"
]
},
{
"source": 9,
"target": 3,
"utterances": [
"Yes, I'd like to find another book"
]
},
{
"source": 9,
"target": 10,
"utterances": [
"No, that's all for today"
]
}
]
},
"topic": "library"
}
]

Large diffs are not rendered by default.

9 changes: 9 additions & 0 deletions experiments/2024.12.10_complex_graph_gen/report.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
Idea: Iterative Graph Generation with Validation Feedback
Proposed approach:

Generate an initial graph using an LLM
Validate the generated graph using a triplet checker to identify errors
Feed both the generated graph and identified errors back to the LLM
Repeat steps 2-3 iteratively until all errors are resolved

This creates a feedback loop where the LLM can learn from and correct mistakes in its graph generation based on concrete validation results. The triplet checker serves as an objective verification mechanism to ensure the generated graph meets all required constraints.
4 changes: 4 additions & 0 deletions experiments/2024.12.10_complex_graph_gen/task.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
Generate 10 more complex graphs using the complexity specified in the prompt


If they turn out diverse and valid, generate up to 20 and send to Yura to test his pipelines

0 comments on commit bd6090f

Please sign in to comment.