diff --git a/llama.cpp/llama.cpp b/llama.cpp/llama.cpp index 16defdd3c4..60a6a3ec1b 100644 --- a/llama.cpp/llama.cpp +++ b/llama.cpp/llama.cpp @@ -19291,6 +19291,19 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "[|assistant|]"; } + } else if (tmpl == "granite" || tmpl == "granitemoe" || tmpl_contains("<|start_of_role|>")) { + // IBM Granite template + for (const auto & message : chat) { + std::string role(message->role); + ss << "<|start_of_role|>" << role << "<|end_of_role|>"; + if (role == "assistant_tool_call") { + ss << "<|tool_call|>"; + } + ss << message->content << "<|end_of_text|>\n"; + } + if (add_ass) { + ss << "<|start_of_role|>assistant<|end_of_role|>\n"; + } } else { // template not supported return -1;