Skip to content

Commit

Permalink
move log
Browse files Browse the repository at this point in the history
  • Loading branch information
omar-sol committed Jul 19, 2024
1 parent 069addf commit 1281058
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 20 deletions.
4 changes: 1 addition & 3 deletions scripts/custom_retriever.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,11 @@ def __init__(
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve nodes given query."""

logger.info(f"Retrieving nodes for query: {query_bundle}")

# LlamaIndex adds "\ninput is " to the query string
query_bundle.query_str = query_bundle.query_str.replace("\ninput is ", "")
query_bundle.query_str = query_bundle.query_str.rstrip()

logger.info(f"Query: {query_bundle.query_str}")
logger.info(f"Retrieving nodes for query: {query_bundle}")

nodes = self._vector_retriever.retrieve(query_bundle)

Expand Down
33 changes: 16 additions & 17 deletions scripts/gradio-ui.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import json
import logging
import os
import pickle
Expand All @@ -12,11 +11,12 @@
from llama_index.agent.openai import OpenAIAgent
from llama_index.core import VectorStoreIndex, get_response_synthesizer
from llama_index.core.agent import AgentRunner, ReActAgent
from llama_index.core.chat_engine import (
CondensePlusContextChatEngine,
CondenseQuestionChatEngine,
ContextChatEngine,
)

# from llama_index.core.chat_engine import (
# CondensePlusContextChatEngine,
# CondenseQuestionChatEngine,
# ContextChatEngine,
# )
from llama_index.core.data_structs import Node
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.node_parser import SentenceSplitter
Expand Down Expand Up @@ -228,18 +228,17 @@ def generate_completion(
else:
llm = OpenAI(temperature=1, model=model, max_tokens=None)

response_synthesizer = get_response_synthesizer(
llm=llm,
response_mode="simple_summarize",
text_qa_template=TEXT_QA_TEMPLATE,
streaming=True,
)
# response_synthesizer = get_response_synthesizer(
# llm=llm,
# response_mode="simple_summarize",
# text_qa_template=TEXT_QA_TEMPLATE,
# streaming=True,
# )

# completion = response_synthesizer.synthesize(query, nodes=nodes_context)
custom_query_engine = RetrieverQueryEngine(
retriever=custom_retriever,
response_synthesizer=response_synthesizer,
)
# custom_query_engine = RetrieverQueryEngine(
# retriever=custom_retriever,
# response_synthesizer=response_synthesizer,
# )

# agent = CondensePlusContextChatEngine.from_defaults(
# agent = CondenseQuestionChatEngine.from_defaults(
Expand Down

0 comments on commit 1281058

Please sign in to comment.