Skip to content

Commit

Permalink
Update chat example, prompt formats
Browse files Browse the repository at this point in the history
  • Loading branch information
turboderp committed Nov 30, 2024
1 parent 1f685bd commit 48e6306
Show file tree
Hide file tree
Showing 2 changed files with 103 additions and 58 deletions.
12 changes: 8 additions & 4 deletions examples/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
parser.add_argument("-un", "--username", type = str, default = "User", help = "Username when using raw chat mode")
parser.add_argument("-bn", "--botname", type = str, default = "Chatbort", help = "Bot name when using raw chat mode")
parser.add_argument("-sp", "--system_prompt", type = str, help = "Use custom system prompt")
parser.add_argument("-nsp", "--no_system_prompt", action = "store_true", help = "Do not use any system prompt")

parser.add_argument("-temp", "--temperature", type = float, default = 0.95, help = "Sampler temperature, default = 0.95 (1 to disable)")
parser.add_argument("-smooth", "--smoothing_factor", type = float, default = 0.0, help = "Smoothing Factor, default = 0.0 (0 to disable")
Expand Down Expand Up @@ -90,6 +91,8 @@
username = args.username
botname = args.botname
system_prompt = args.system_prompt
if args.no_system_prompt:
system_prompt = ""

if args.mode is None:
print(" ## Error: No mode specified.")
Expand Down Expand Up @@ -185,7 +188,7 @@ def format_prompt(user_prompt, first):
global system_prompt, prompt_format

if first:
return prompt_format.first_prompt() \
return prompt_format.first_prompt(not system_prompt) \
.replace("<|system_prompt|>", system_prompt) \
.replace("<|user_prompt|>", user_prompt)
else:
Expand Down Expand Up @@ -288,9 +291,10 @@ def get_tokenized_context(max_len):
# Main loop

print(f" -- Prompt format: {args.mode}")
print(f" -- System prompt:")
print()
print(col_sysprompt + system_prompt.strip() + col_default)
if system_prompt:
print(f" -- System prompt:")
print()
print(col_sysprompt + system_prompt.strip() + col_default)

while True:

Expand Down
149 changes: 95 additions & 54 deletions examples/chat_prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def __init__(self):
def default_system_prompt(self):
raise NotImplementedError

def first_prompt(self):
def first_prompt(self, sysprompt: bool):
raise NotImplementedError

def subs_prompt(self):
Expand Down Expand Up @@ -44,9 +44,11 @@ def default_system_prompt(self):
f"""This is a conversation between a helpful AI assistant named {self.botname} and a """ + \
(f"""user named {self.username}.""" if self.username != "User" else """user.""")

def first_prompt(self):
return \
f"""<|system_prompt|>\n{self.username}: <|user_prompt|>\n{self.botname}:"""
def first_prompt(self, sysprompt):
if sysprompt:
return f"""<|system_prompt|>\n{self.username}: <|user_prompt|>\n{self.botname}:"""
else:
return f"""{self.username}: <|user_prompt|>\n{self.botname}:"""

def subs_prompt(self):
return \
Expand All @@ -61,7 +63,7 @@ def stop_conditions(self, tokenizer):
tokenizer.eos_token_id]

def encoding_options(self):
return False, False, False
return True, False, False

def print_bot_name(self):
return True
Expand All @@ -81,9 +83,11 @@ def default_system_prompt(self):
"""Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. """ + \
"""Please ensure that your responses are socially unbiased and positive in nature."""

def first_prompt(self):
return \
"""[INST] <<SYS>>\n<|system_prompt|>\n<</SYS>>\n\n<|user_prompt|> [/INST]"""
def first_prompt(self, sysprompt):
if sysprompt:
return """[INST] <<SYS>>\n<|system_prompt|>\n<</SYS>>\n\n<|user_prompt|> [/INST]"""
else:
return """[INST] <|user_prompt|> [/INST]"""

def subs_prompt(self):
return \
Expand Down Expand Up @@ -115,19 +119,23 @@ def default_system_prompt(self):
"""to find the answer or suggest where to find it. Keep responses concise and relevant. Follow ethical """ + \
"""guidelines and promote a safe and respectful interaction."""

def first_prompt(self):
return \
"""<|start_header_id|>system<|end_header_id|>\n\n""" + \
"""<|system_prompt|><|eot_id|>""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += \
"""<|start_header_id|>system<|end_header_id|>\n\n""" + \
"""<|system_prompt|><|eot_id|>"""
r += \
"""<|start_header_id|>user<|end_header_id|>\n\n""" + \
"""<|user_prompt|><|eot_id|>""" + \
"""<|start_header_id|>assistant<|end_header_id|>"""
"""<|start_header_id|>assistant<|end_header_id|>\n\n"""
return r

def subs_prompt(self):
return \
"""<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n""" + \
"""<|user_prompt|><|eot_id|>""" + \
"""<|start_header_id|>assistant<|end_header_id|>"""
"""<|start_header_id|>assistant<|end_header_id|>\n\n"""

def stop_conditions(self, tokenizer):
return \
Expand All @@ -136,7 +144,7 @@ def stop_conditions(self, tokenizer):
tokenizer.single_id("<|start_header_id|>")]

def encoding_options(self):
return False, False, True
return True, False, True

def print_extra_newline(self):
return True
Expand All @@ -154,14 +162,18 @@ def default_system_prompt(self):
return \
"""You are a helpful AI assistant."""

def first_prompt(self):
return \
"""<s><|system|>\n""" + \
"""<|system_prompt|>""" + \
"""<|end|>\n""" + \
def first_prompt(self, sysprompt):
r = """<s>"""
if sysprompt:
r += \
"""<|system|>\n""" + \
"""<|system_prompt|>""" + \
"""<|end|>\n"""
r += \
"""<|user|>\n""" + \
"""<|user_prompt|><|end|>\n""" + \
"""<|assistant|>\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -210,14 +222,18 @@ def default_system_prompt(self):
return \
f"""You are {self.botname}, a large language model. Answer as concisely as possible."""

def first_prompt(self):
return \
"""<|im_start|>system\n""" + \
"""<|system_prompt|>\n""" + \
"""<|im_end|>\n""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += \
"""<|im_start|>system\n""" + \
"""<|system_prompt|>\n""" + \
"""<|im_end|>\n"""
r += \
"""<|im_start|>user\n""" + \
"""<|user_prompt|><|im_end|>\n""" + \
"""<|im_start|>assistant\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -259,14 +275,18 @@ def default_system_prompt(self):
return \
f"""You are {self.botname}, a large language model. Answer as concisely as possible."""

def first_prompt(self):
return \
"""<|system|>\n""" + \
"""<|system_prompt|>\n""" + \
"""</s>\n""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += \
"""<|system|>\n""" + \
"""<|system_prompt|>\n""" + \
"""</s>\n"""
r += \
"""<|user|>\n""" + \
"""<|user_prompt|></s>\n""" + \
"""<|assistant|>\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -299,12 +319,15 @@ def default_system_prompt(self):
return \
f"""You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer."""

def first_prompt(self):
return \
"""<|system_prompt|>\n""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += """<|system_prompt|>\n"""
r += \
"""### Instruction:\n""" + \
"""<|user_prompt|>\n""" + \
"""### Response:\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -335,13 +358,17 @@ def default_system_prompt(self):
return \
f"""You are an AI assistant."""

def first_prompt(self):
return \
"""### System\n""" + \
"""<|system_prompt|>\n\n""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += \
"""### System\n""" + \
"""<|system_prompt|>\n\n"""
r += \
"""### User:\n""" + \
"""<|user_prompt|>\n\n""" + \
"""### Assistant:\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -374,9 +401,13 @@ def default_system_prompt(self):
return \
f"""You are an AI assistant."""

def first_prompt(self):
return \
"""<|system_prompt|><|end_of_turn|>GPT4 Correct User:<|user_prompt|><|end_of_turn|>GPT4 Correct Assistant:"""
def first_prompt(self, sysprompt):
if sysprompt:
return \
"""<|system_prompt|><|end_of_turn|>GPT4 Correct User:<|user_prompt|><|end_of_turn|>GPT4 Correct Assistant:"""
else:
return \
"""GPT4 Correct User:<|user_prompt|><|end_of_turn|>GPT4 Correct Assistant:"""

def subs_prompt(self):
return \
Expand Down Expand Up @@ -408,12 +439,15 @@ def default_system_prompt(self):
return \
f"""Perform the task to the best of your ability."""

def first_prompt(self):
return \
"""<|system_prompt|>\n\n""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += """<|system_prompt|>\n\n"""
r += \
"""USER:\n""" + \
"""<|user_prompt|>\n\n""" + \
"""ASSISTANT:\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -444,7 +478,7 @@ def __init__(self):
def default_system_prompt(self):
return ""

def first_prompt(self):
def first_prompt(self, sysprompt):
return \
"""<bos><start_of_turn>user\n""" + \
"""<|user_prompt|><end_of_turn>\n""" + \
Expand Down Expand Up @@ -481,13 +515,17 @@ def __init__(self):
def default_system_prompt(self):
return "You are an AI coding assistant."

def first_prompt(self):
return \
"""System:\n""" + \
"""<|system_prompt|>\n\n""" + \
def first_prompt(self, sysprompt):
r = ""
if sysprompt:
r += \
"""System:\n""" + \
"""<|system_prompt|>\n\n"""
r += \
"""Question:\n""" + \
"""<|user_prompt|>\n\n""" + \
"""Answer:\n"""
return r

def subs_prompt(self):
return \
Expand Down Expand Up @@ -520,16 +558,19 @@ def default_system_prompt(self):
return \
f"""You are a helpful AI assistant."""

def first_prompt(self):
return \
"""<BOS_TOKEN>""" + \
"""<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>""" + \
"""<|system_prompt|>""" + \
"""<|END_OF_TURN_TOKEN|>""" + \
def first_prompt(self, sysprompt):
r = """<BOS_TOKEN>"""
if sysprompt:
r += \
"""<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>""" + \
"""<|system_prompt|>""" + \
"""<|END_OF_TURN_TOKEN|>"""
r += \
"""<|START_OF_TURN_TOKEN|><|USER_TOKEN|>""" + \
"""<|user_prompt|>""" + \
"""<|END_OF_TURN_TOKEN|>""" + \
"""<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"""
return r

def subs_prompt(self):
return \
Expand Down

0 comments on commit 48e6306

Please sign in to comment.