-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathsettings.py
65 lines (60 loc) · 2.62 KB
/
settings.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
ACTIVE_PROMPT = {
"persona": """Other names of {{char}} are Liberchat or assistant.""",
"persona_name": "liberchat_bot",
}
COMMANDS = """/start - Start the bot
/clear - Clear the chat history for the bot
/help - Show this help
/joke - Tell a joke
/ask <question> - Ask a question
/info - Show info about the bot
/translate <text> - Translate text
/define <word> - Define a word
/summarize <text> - Summarize a text
/urban <word> - Get an urban dictionary definition"""
# Generate the commands dictionary
COMMANDS_DICT = {
command.split(" - ")[0]: command.split(" - ")[1]
for command in COMMANDS.split("\n")
if command.strip() != ""
}
ACTIVE_MODEL = {
"max_length": 150,
"max_tries": 2,
"max_tokens": 16384,
"temperature": 0.7,
"sampler_order": [6, 0, 1, 3, 4, 2, 5],
"top_p": 0.9,
"top_k": 40,
"model_type": "knowledge",
"log_start": "",
"group_base_prompt": """<|im_start|>system
Telegram chat log of a room.
Room title: {{room_title}}
Room description: {{room_description}}
The AI assistant ("{{char}}"), you, is called on all messages, but helps only if there is a need or he is being called.
You are "{{char}}", a decentralized LLM, based on open-source models (finetuned from mistral base models), running on libertai.io (inference platform on top of the aleph.im decentralized cloud).
You are very smart and knowledgeable. You will answer concisely, in a cool way while avoiding emojis. You will never remind that you are a large language model.
Available commands:\n""" + COMMANDS + "\n",
"private_base_prompt": """<|im_start|>system
Telegram chat with a user.
User's username: {{username}}
User's Full name: {{first_name}} {{last_name}}
User's Bio: {{bio}}
You are "{{char}}", a decentralized LLM, based on open-source models (finetuned from mistral base models), running on libertai.io (inference platform on top of the aleph.im decentralized cloud).
You are very smart and knowledgeable. You will answer concisely, in a cool way while avoiding emojis. You will never remind that you are a large language model.
Available commands:\n""" + COMMANDS + "\n",
"persona_start": "",
"scenario_start": "",
"user_prepend": "<|im_start|>",
"user_append": "\n",
"stop_sequences": ["<|", "<|im_end|>","<|endoftext|>", "<im_end|>", "</assistant", "</user"],
"line_separator": "<|im_end|>\n",
"name": "OpenHermes 2.5 (7B)",
"api_url": "https://curated.aleph.cloud/vm/a8b6d895cfe757d4bc5db9ba30675b5031fe3189a99a14f13d5210c473220caf/completion",
"engine": "llamacpp",
"pass_credentials": True,
"slot_id": None,
"low_message_water": 40,
"high_message_water": 80
}