forked from wxywb/history_rag
-
Notifications
You must be signed in to change notification settings - Fork 0
/
executor.py
374 lines (322 loc) · 15.5 KB
/
executor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
import argparse
import logging
import sys
import re
import os
import argparse
import requests
from pathlib import Path
from urllib.parse import urlparse
from llama_index import ServiceContext, StorageContext
from llama_index import set_global_service_context
from llama_index import VectorStoreIndex, SimpleDirectoryReader, Document
from llama_index.llms import OpenAI
from llama_index.readers.file.flat_reader import FlatReader
from llama_index.vector_stores import MilvusVectorStore
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.node_parser.text import SentenceWindowNodeParser
from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole, PromptTemplate
from llama_index.postprocessor import MetadataReplacementPostProcessor
from llama_index.postprocessor import SentenceTransformerRerank
#from llama_index.indices import ZillizCloudPipelineIndex
from custom.zilliz.base import ZillizCloudPipelineIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.schema import BaseNode, ImageNode, MetadataMode
from custom.history_sentence_window import HistorySentenceWindowNodeParser
from custom.llms.QwenLLM import QwenUnofficial
from pymilvus import MilvusClient
QA_PROMPT_TMPL_STR = (
"请你仔细阅读相关内容,结合历史资料进行回答,每一条史资料使用'出处:《书名》原文内容'的形式标注 (如果回答请清晰无误地引用原文,先给出回答,再贴上对应的原文,使用《书名》[]对原文进行标识),,如果发现资料无法得到答案,就回答不知道 \n"
"搜索的相关历史资料如下所示.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"问题: {query_str}\n"
"答案: "
)
QA_SYSTEM_PROMPT = "你是一个严谨的历史知识问答智能体,你会仔细阅读历史材料并给出准确的回答,你的回答都会非常准确,因为你在回答的之后,使用在《书名》[]内给出原文用来支撑你回答的证据.并且你会在开头说明原文是否有回答所需的知识"
REFINE_PROMPT_TMPL_STR = (
"你是一个历史知识回答修正机器人,你严格按以下方式工作"
"1.只有原答案为不知道时才进行修正,否则输出原答案的内容\n"
"2.修正的时候为了体现你的精准和客观,你非常喜欢使用《书名》[]将原文展示出来.\n"
"3.如果感到疑惑的时候,就用原答案的内容回答。"
"新的知识: {context_msg}\n"
"问题: {query_str}\n"
"原答案: {existing_answer}\n"
"新答案: "
)
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def is_github_folder_url(url):
return url.startswith('https://raw.githubusercontent.com/') and '.' not in os.path.basename(url)
def get_github_repo_contents(repo_url):
response = requests.get(repo_url)
filenames = []
if response.status_code == 200:
contents = response.json()
for item in contents['payload']['tree']['items']:
filenames.append(item['name'])
else:
print(f"Failed to fetch contents. Status code: {response.status_code}")
return filenames
class Executor:
def __init__(self, model):
pass
def build_index(self, path, overwrite):
pass
def build_query_engine(self):
pass
def delete_file(self, path):
pass
def query(self, question):
pass
class MilvusExecutor(Executor):
def __init__(self, config):
self.index = None
self.query_engine = None
self.config = config
self.node_parser = HistorySentenceWindowNodeParser.from_defaults(
sentence_splitter=lambda text: re.findall("[^,.;。?!]+[,.;。?!]?", text),
window_size=config.milvus.window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",)
embed_model = HuggingFaceEmbedding(model_name=config.embedding.name)
# 使用Qwen 通义千问模型
if config.llm.name == "qwen":
llm = QwenUnofficial(temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
else:
api_base = None
if 'api_base' in config.llm:
api_base = config.llm.api_base
llm = OpenAI(api_base = api_base, temperature=config.llm.temperature, model=config.llm.name, max_tokens=2048)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
set_global_service_context(service_context)
rerank_k = config.milvus.rerank_topk
self.rerank_postprocessor = SentenceTransformerRerank(
model=config.rerank.name, top_n=rerank_k)
self._milvus_client = None
self._debug = False
def set_debug(self, mode):
self._debug = mode
def build_index(self, path, overwrite):
config = self.config
vector_store = MilvusVectorStore(
host = config.milvus.host,
port = config.milvus.port,
collection_name = config.milvus.collection_name,
overwrite=overwrite,
dim=config.embedding.dim)
self._milvus_client = vector_store.milvusclient
if path.endswith('.txt'):
if os.path.exists(path) is False:
print(f'(rag) 没有找到文件{path}')
return
else:
documents = FlatReader().load_data(Path(path))
documents[0].metadata['file_name'] = documents[0].metadata['filename']
elif os.path.isfile(path):
print('(rag) 目前仅支持txt文件')
elif os.path.isdir(path):
if os.path.exists(path) is False:
print(f'(rag) 没有找到目录{path}')
return
else:
documents = SimpleDirectoryReader(path).load_data()
else:
return
storage_context = StorageContext.from_defaults(vector_store=vector_store)
nodes = self.node_parser.get_nodes_from_documents(documents)
self.index = VectorStoreIndex(nodes, storage_context=storage_context, show_progress=True)
def _get_index(self):
config = self.config
vector_store = MilvusVectorStore(
host = config.milvus.host,
port = config.milvus.port,
collection_name = config.milvus.collection_name,
dim=config.embedding.dim)
self.index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
self._milvus_client = vector_store.milvusclient
def build_query_engine(self):
config = self.config
if self.index is None:
self._get_index()
self.query_engine = self.index.as_query_engine(node_postprocessors=[
self.rerank_postprocessor,
MetadataReplacementPostProcessor(target_metadata_key="window")
])
self.query_engine._retriever.similarity_top_k=config.milvus.retrieve_topk
message_templates = [
ChatMessage(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM),
ChatMessage(
content=QA_PROMPT_TMPL_STR,
role=MessageRole.USER,
),
]
chat_template = ChatPromptTemplate(message_templates=message_templates)
self.query_engine.update_prompts(
{"response_synthesizer:text_qa_template": chat_template}
)
self.query_engine._response_synthesizer._refine_template.conditionals[0][1].message_templates[0].content = REFINE_PROMPT_TMPL_STR
def delete_file(self, path):
config = self.config
if self._milvus_client is None:
self._get_index()
num_entities_prev = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
res = self._milvus_client.delete(collection_name=config.milvus.collection_name, filter=f"file_name=='{path}'")
num_entities = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
print(f'(rag) 现有{num_entities}条,删除{num_entities_prev - num_entities}条数据')
def query(self, question):
if self.index is None:
self._get_index()
if question.endswith('?') or question.endswith('?'):
question = question[:-1]
if self._debug is True:
contexts = self.query_engine.retrieve(QueryBundle(question))
for i, context in enumerate(contexts):
print(f'{question}', i)
content = context.node.get_content(metadata_mode=MetadataMode.LLM)
print(content)
print('-------------------------------------------------------参考资料---------------------------------------------------------')
response = self.query_engine.query(question)
return response
class PipelineExecutor(Executor):
def __init__(self, config):
self.ZILLIZ_CLUSTER_ID = os.getenv("ZILLIZ_CLUSTER_ID")
self.ZILLIZ_TOKEN = os.getenv("ZILLIZ_TOKEN")
self.ZILLIZ_PROJECT_ID = os.getenv("ZILLIZ_PROJECT_ID")
self.ZILLIZ_CLUSTER_ENDPOINT = f"https://{self.ZILLIZ_CLUSTER_ID}.api.gcp-us-west1.zillizcloud.com"
self.config = config
if len(self.ZILLIZ_CLUSTER_ID) == 0:
print('ZILLIZ_CLUSTER_ID 参数为空')
exit()
if len(self.ZILLIZ_TOKEN) == 0:
print('ZILLIZ_TOKEN 参数为空')
exit()
self._initialize_pipeline()
self.config = config
self._debug = False
llm = OpenAI(temperature=config.llm.temperature, model=config.llm.name)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=None)
set_global_service_context(service_context)
#rerank_k = config.rerankl
#self.rerank_postprocessor = SentenceTransformerRerank(
# model="BAAI/bge-reranker-large", top_n=rerank_k)
def set_debug(self, mode):
self._debug = mode
def _initialize_pipeline(self):
config = self.config
try:
self.index = ZillizCloudPipelineIndex(
project_id = self.ZILLIZ_PROJECT_ID,
cluster_id=self.ZILLIZ_CLUSTER_ID,
token=self.ZILLIZ_TOKEN,
collection_name=config.pipeline.collection_name,
)
if len(self._list_pipeline_ids()) == 0:
self.index.create_pipelines(
metadata_schema={"digest_from":"VarChar"}, chunk_size=self.config.pipeline.chunk_size
)
except Exception as e:
print('(rag) zilliz pipeline 连接异常', str(e))
exit()
try:
self._milvus_client = MilvusClient(
uri=self.ZILLIZ_CLUSTER_ENDPOINT,
token=self.ZILLIZ_TOKEN
)
except Exception as e:
print('(rag) zilliz cloud 连接异常', str(e))
def build_index(self, path, overwrite):
config = self.config
if not is_valid_url(path) or 'github' not in path:
print('(rag) 不是一个合法的url,请尝试`https://raw.githubusercontent.com/wxywb/history_rag/master/data/history_24/baihuasanguozhi.txt`')
return
if overwrite == True:
self._milvus_client.drop_collection(config.pipeline.collection_name)
pipeline_ids = self._list_pipeline_ids()
self._delete_pipeline_ids(pipeline_ids)
self._initialize_pipeline()
if is_github_folder_url(path):
filenames = get_github_repo_contents(path)
for filename in filenames:
if filename.endswith('txt'):
self.build_index(self, path + f'/{filename}')
elif path.endswith('.txt'):
self.index.insert_doc_url(
url=path,
metadata={"digest_from": HistorySentenceWindowNodeParser.book_name(os.path.basename(path))},
)
else:
print('(rag) 只有github上以txt结尾或文件夹可以被支持。')
def build_query_engine(self):
config = self.config
self.query_engine = self.index.as_query_engine(
search_top_k=config.pipeline.retrieve_topk)
message_templates = [
ChatMessage(content=QA_SYSTEM_PROMPT, role=MessageRole.SYSTEM),
ChatMessage(
content=QA_PROMPT_TMPL_STR,
role=MessageRole.USER,
),
]
chat_template = ChatPromptTemplate(message_templates=message_templates)
self.query_engine.update_prompts(
{"response_synthesizer:text_qa_template": chat_template}
)
self.query_engine._response_synthesizer._refine_template.conditionals[0][1].message_templates[0].content = REFINE_PROMPT_TMPL_STR
def delete_file(self, path):
config = self.config
if self._milvus_client is None:
self._get_index()
num_entities_prev = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
res = self._milvus_client.delete(collection_name=config.milvus.collection_name, filter=f"doc_name=='{path}'")
num_entities = self._milvus_client.query(collection_name='history_rag',filter="",output_fields=["count(*)"])[0]["count(*)"]
print(f'(rag) 现有{num_entities}条,删除{num_entities_prev - num_entities}条数据')
def query(self, question):
if self.index is None:
self.get_index()
if question.endswith("?") or question.endswith("?"):
question = question[:-1]
if self._debug is True:
contexts = self.query_engine.retrieve(QueryBundle(question))
for i, context in enumerate(contexts):
print(f'{question}', i)
content = context.node.get_content(metadata_mode=MetadataMode.LLM)
print(content)
print('-------------------------------------------------------参考资料---------------------------------------------------------')
response = self.query_engine.query(question)
return response
def _list_pipeline_ids(self):
url = f"https://controller.api.gcp-us-west1.zillizcloud.com/v1/pipelines?projectId={self.ZILLIZ_PROJECT_ID}"
headers = {
"Authorization": f"Bearer {self.ZILLIZ_TOKEN}",
"Accept": "application/json",
"Content-Type": "application/json",
}
collection_name = self.config.milvus.collection_name
response = requests.get(url, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.text)
response_dict = response.json()
if response_dict["code"] != 200:
raise RuntimeError(response_dict)
pipeline_ids = []
for pipeline in response_dict['data']:
if collection_name in pipeline['name']:
pipeline_ids.append(pipeline['pipelineId'])
return pipeline_ids
def _delete_pipeline_ids(self, pipeline_ids):
for pipeline_id in pipeline_ids:
url = f"https://controller.api.gcp-us-west1.zillizcloud.com/v1/pipelines/{pipeline_id}/"
headers = {
"Authorization": f"Bearer {self.ZILLIZ_TOKEN}",
"Accept": "application/json",
"Content-Type": "application/json",
}
response = requests.delete(url, headers=headers)
if response.status_code != 200:
raise RuntimeError(response.text)