Skip to content

Commit

Permalink
feat(llama.cpp): First pass at full port of granite deviations from l…
Browse files Browse the repository at this point in the history
…lama

Something is still not working right since the results are mostly terrible,
but on occasion it's producing relevant results at this point, so
_something_ is working.

Branch: GraniteLM

Signed-off-by: Gabe Goodhart <[email protected]>
  • Loading branch information
gabe-l-hart committed Sep 16, 2024
1 parent 383065a commit ec13f29
Showing 1 changed file with 42 additions and 1 deletion.
43 changes: 42 additions & 1 deletion src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1462,6 +1462,22 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
{ LLM_TENSOR_CHANNEL_MIX_RECEPTANCE, "blk.%d.channel_mix_receptance" },
},
},
{
LLM_ARCH_GRANITE,
{
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
{ LLM_TENSOR_OUTPUT_NORM, "output_norm" },
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm" },
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm" },
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
},
},
{
LLM_ARCH_UNKNOWN,
{
Expand Down Expand Up @@ -6915,6 +6931,7 @@ static bool llm_load_tensors(
case LLM_ARCH_LLAMA:
case LLM_ARCH_REFACT:
case LLM_ARCH_MINICPM:
case LLM_ARCH_GRANITE:
{
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});

Expand Down Expand Up @@ -8898,6 +8915,11 @@ static struct ggml_tensor * llm_build_inp_embd(
ggml_set_input(lctx.inp_embd);
}

// For Granite architecture
if (hparams.f_embedding_multiplier != 0.0f) {
inpL = ggml_scale(ctx, inpL, hparams.f_embedding_multiplier);
}

cb(inpL, "inp_embd", -1);

return inpL;
Expand Down Expand Up @@ -10176,6 +10198,7 @@ struct llm_build_context {
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();

const float kq_scale = hparams.f_attention_multiplier == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_multiplier;
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;

Expand Down Expand Up @@ -10228,7 +10251,7 @@ struct llm_build_context {

cur = llm_build_kv(ctx0, lctx, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, kq_scale, cb, il);
}

if (il == n_layer - 1) {
Expand All @@ -10239,6 +10262,11 @@ struct llm_build_context {
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
}

// For Granite architecture
if (hparams.f_residual_multiplier) {
cur = ggml_scale(ctx0, cur, hparams.f_residual_multiplier);
}

struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
cb(ffn_inp, "ffn_inp", il);

Expand Down Expand Up @@ -10275,6 +10303,11 @@ struct llm_build_context {
cb(cur, "ffn_moe_out", il);
}

// For Granite architecture
if (hparams.f_residual_multiplier) {
cur = ggml_scale(ctx0, cur, hparams.f_residual_multiplier);
}

cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il);

Expand All @@ -10294,6 +10327,12 @@ struct llm_build_context {

// lm_head
cur = llm_build_lora_mm(lctx, ctx0, model.output, cur);

// For Granite architecture
if (hparams.f_logit_scale) {
cur = ggml_scale(ctx0, cur, 1.0f / hparams.f_logit_scale);
}

cb(cur, "result_output", -1);

ggml_build_forward_expand(gf, cur);
Expand Down Expand Up @@ -15819,6 +15858,7 @@ static struct ggml_cgraph * llama_build_graph(

switch (model.arch) {
case LLM_ARCH_LLAMA:
case LLM_ARCH_GRANITE:
{
result = llm.build_llama();
} break;
Expand Down Expand Up @@ -19115,6 +19155,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
case LLM_ARCH_ARCTIC:
case LLM_ARCH_DEEPSEEK2:
case LLM_ARCH_CHATGLM:
case LLM_ARCH_GRANITE:
return LLAMA_ROPE_TYPE_NORM;

// the pairs of head values are offset by n_rot/2
Expand Down

0 comments on commit ec13f29

Please sign in to comment.