Skip to content

Commit

Permalink
Add chat format test.
Browse files Browse the repository at this point in the history
  • Loading branch information
abetlen committed Jan 29, 2024
1 parent ce38dbd commit 9ae5819
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 10 deletions.
22 changes: 12 additions & 10 deletions llama_cpp/llama_chat_format.py
Original file line number Diff line number Diff line change
Expand Up @@ -878,19 +878,21 @@ def format_chatml(


@register_chat_format("mistral-instruct")
def format_mistral(
def format_mistral_instruct(
messages: List[llama_types.ChatCompletionRequestMessage],
**kwargs: Any,
) -> ChatFormatterResponse:
_roles = dict(user="[INST] ", assistant="[/INST]")
_sep = " "
system_template = """<s>{system_message}"""
system_message = _get_system_message(messages)
system_message = system_template.format(system_message=system_message)
_messages = _map_roles(messages, _roles)
_messages.append((_roles["assistant"], None))
_prompt = _format_no_colon_single(system_message, _messages, _sep)
return ChatFormatterResponse(prompt=_prompt)
bos = "<s>"
eos = "</s>"
stop = eos
prompt = bos
for message in messages:
if message["role"] == "user" and message["content"] is not None and isinstance(message["content"], str):
prompt += "[INST] " + message["content"]
elif message["role"] == "assistant" and message["content"] is not None and isinstance(message["content"], str):
prompt += " [/INST]" + message["content"] + eos
prompt += " [/INST]"
return ChatFormatterResponse(prompt=prompt, stop=stop)


@register_chat_format("chatglm3")
Expand Down
23 changes: 23 additions & 0 deletions tests/test_llama_chat_format.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,33 @@
import json

import jinja2

from llama_cpp import (
ChatCompletionRequestUserMessage,
)
import llama_cpp.llama_types as llama_types
import llama_cpp.llama_chat_format as llama_chat_format

from llama_cpp.llama_chat_format import hf_tokenizer_config_to_chat_formatter

def test_mistral_instruct():
chat_template = "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token}}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}"
chat_formatter = jinja2.Template(chat_template)
messages = [
llama_types.ChatCompletionRequestUserMessage(role="user", content="Instruction"),
llama_types.ChatCompletionRequestAssistantMessage(role="assistant", content="Model answer"),
llama_types.ChatCompletionRequestUserMessage(role="user", content="Follow-up instruction"),
]
response = llama_chat_format.format_mistral_instruct(
messages=messages,
)
reference = chat_formatter.render(
messages=messages,
bos_token="<s>",
eos_token="</s>",
)
assert response.prompt == reference


mistral_7b_tokenizer_config = """{
"add_bos_token": true,
Expand Down

0 comments on commit 9ae5819

Please sign in to comment.