From 8e092616a9c172299eaf6b057802bc6646883997 Mon Sep 17 00:00:00 2001 From: jukofyork <69222624+jukofyork@users.noreply.github.com> Date: Sat, 22 Jun 2024 10:39:26 +0100 Subject: [PATCH 1/5] fixes #7999 The `build_command_r` forgot to add the control vector. --- llama.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/llama.cpp b/llama.cpp index a05a52b4234cd..bb87ec058cdc9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -11047,6 +11047,13 @@ struct llm_build_context { // add together residual + FFN + self-attention cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, attn_out); + + // add control vector + ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); + if (layer_dir != nullptr) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); // input for next layer From af2e0330518382aaef8ecbe186dd0840c91a6b5a Mon Sep 17 00:00:00 2001 From: jukofyork <69222624+jukofyork@users.noreply.github.com> Date: Sat, 22 Jun 2024 11:07:17 +0100 Subject: [PATCH 2/5] Fixes qwen2 too --- llama.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/llama.cpp b/llama.cpp index bb87ec058cdc9..587134926a6cf 100644 --- a/llama.cpp +++ b/llama.cpp @@ -9442,6 +9442,13 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); + + // add control vector + ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); + if (layer_dir != nullptr) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); // input for next layer From f57ebf281eca7fc3d87439072ea8355514156b5d Mon Sep 17 00:00:00 2001 From: jukofyork <69222624+jukofyork@users.noreply.github.com> Date: Sat, 22 Jun 2024 17:10:08 +0100 Subject: [PATCH 3/5] Fixed all models' control vectors --- llama.cpp | 143 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 111 insertions(+), 32 deletions(-) diff --git a/llama.cpp b/llama.cpp index 587134926a6cf..5edce026a7a00 100644 --- a/llama.cpp +++ b/llama.cpp @@ -7818,8 +7818,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } cb(cur, "l_out", il); @@ -7936,6 +7935,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -8040,6 +8042,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -8158,6 +8163,9 @@ struct llm_build_context { cb(cur, "l_out", il); cur = ggml_add(ctx0, cur, inpL); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -8309,8 +8317,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } cb(cur, "l_out", il); @@ -8443,8 +8450,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } cb(cur, "l_out", il); @@ -8552,8 +8558,14 @@ struct llm_build_context { cb(cur, "ffn_out", il); } - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); + cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } cur = llm_build_norm(ctx0, inpL, hparams, @@ -8641,6 +8653,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -8936,8 +8951,14 @@ struct llm_build_context { cb(cur, "ffn_out", il); } - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); + cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } cur = llm_build_norm(ctx0, inpL, hparams, @@ -9071,6 +9092,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -9219,6 +9243,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -9331,6 +9358,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -9442,13 +9472,9 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); - - // add control vector - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } - cb(cur, "l_out", il); // input for next layer @@ -9594,6 +9620,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -9717,8 +9746,12 @@ struct llm_build_context { cb(cur, "l_out", il); cur = ggml_add(ctx0, cur, inpL); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); + // input for next layer inpL = cur; } @@ -9850,8 +9883,12 @@ struct llm_build_context { } cur = ggml_add(ctx0, residual, cur); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); + // input for next layer inpL = cur; } @@ -9953,6 +9990,9 @@ struct llm_build_context { cb(cur, "l_out", il); cur = ggml_add(ctx0, cur, inpL); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -10058,8 +10098,14 @@ struct llm_build_context { cb(cur, "ffn_out", il); } - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); + cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } cur = llm_build_norm(ctx0, inpL, hparams, @@ -10165,8 +10211,14 @@ struct llm_build_context { cb(cur, "ffn_out", il); } - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); + cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } cur = llm_build_norm(ctx0, inpL, hparams, @@ -10278,6 +10330,9 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -10395,6 +10450,9 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -10536,6 +10594,9 @@ struct llm_build_context { cb(cur, "hidden_scaled_ffn", -1); cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -10648,6 +10709,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, sa_out); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -10765,6 +10829,9 @@ struct llm_build_context { LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -10913,6 +10980,9 @@ struct llm_build_context { // residual cur = ggml_add(ctx0, cur, inpL); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer @@ -11054,13 +11124,9 @@ struct llm_build_context { // add together residual + FFN + self-attention cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, attn_out); - - // add control vector - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } - cb(cur, "l_out", il); // input for next layer @@ -11196,8 +11262,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } cb(cur, "l_out", il); @@ -11313,8 +11378,14 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, inpL); cb(cur, "ffn_out", il); - inpL = ggml_add(ctx0, cur, attn_out); - cb(inpL, "l_out", il); + cur = ggml_add(ctx0, cur, attn_out); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } else { // attention and ffn are computed sequentially // x = x + attn(ln1(x)) @@ -11337,8 +11408,14 @@ struct llm_build_context { LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); - inpL = ggml_add(ctx0, cur, ffn_inp); - cb(inpL, "l_out", il); + cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } + cb(cur, "l_out", il); + + // input for next layer + inpL = cur; } } @@ -11465,8 +11542,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_out); cb(cur, "ffn_out", il); - ggml_tensor * layer_dir = lctx.cvec.tensor_for(il); - if (layer_dir != nullptr) { + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); } cb(cur, "l_out", il); @@ -11701,6 +11777,9 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); + if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { + cur = ggml_add(ctx0, cur, layer_dir); + } cb(cur, "l_out", il); // input for next layer From f393795a79452dcf319ab63a4207887da2ef464c Mon Sep 17 00:00:00 2001 From: jukofyork <69222624+jukofyork@users.noreply.github.com> Date: Sat, 22 Jun 2024 18:02:11 +0100 Subject: [PATCH 4/5] Removed double calls to `cb(cur, "l_out", il)` --- llama.cpp | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/llama.cpp b/llama.cpp index 5edce026a7a00..dba720e405a73 100644 --- a/llama.cpp +++ b/llama.cpp @@ -8160,8 +8160,6 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - cb(cur, "l_out", il); - cur = ggml_add(ctx0, cur, inpL); if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); @@ -9743,8 +9741,6 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_output); - cb(cur, "l_out", il); - cur = ggml_add(ctx0, cur, inpL); if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); @@ -9987,8 +9983,6 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, sa_out); - cb(cur, "l_out", il); - cur = ggml_add(ctx0, cur, inpL); if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); @@ -10828,6 +10822,7 @@ struct llm_build_context { NULL, LLM_FFN_GELU, LLM_FFN_SEQ, cb, il); cb(cur, "ffn_out", il); + cur = ggml_add(ctx0, cur, ffn_inp); if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { cur = ggml_add(ctx0, cur, layer_dir); From a5a53194ffcce27594152715a535d731237e73a2 Mon Sep 17 00:00:00 2001 From: jukofyork <69222624+jukofyork@users.noreply.github.com> Date: Tue, 25 Jun 2024 17:22:05 +0100 Subject: [PATCH 5/5] Moved control vector logic to llama_control_vector:apply_to() --- llama.cpp | 138 +++++++++++++++++------------------------------------- 1 file changed, 42 insertions(+), 96 deletions(-) diff --git a/llama.cpp b/llama.cpp index a81001b37cd90..c5a0293fbadf3 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2295,13 +2295,21 @@ struct llama_control_vector { int32_t layer_start = -1; int32_t layer_end = -1; - ggml_tensor * tensor_for(int il) const { + struct ggml_tensor * tensor_for(int il) const { if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) { return nullptr; } return tensors[il]; } + struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const { + ggml_tensor * layer_dir = tensor_for(il); + if (layer_dir != nullptr) { + cur = ggml_add(ctx, cur, layer_dir); + } + return cur; + } + ~llama_control_vector() { for (struct ggml_context * ctx : ctxs) { ggml_free(ctx); @@ -7901,9 +7909,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8018,9 +8024,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8125,9 +8129,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8244,9 +8246,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cur = ggml_add(ctx0, cur, inpL); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8398,9 +8398,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8531,9 +8529,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8640,9 +8636,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -8734,9 +8728,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9033,9 +9025,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9173,9 +9163,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9324,9 +9312,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9439,9 +9425,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9553,9 +9537,7 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9701,9 +9683,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9825,9 +9805,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_output); cur = ggml_add(ctx0, cur, inpL); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -9962,9 +9940,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, residual, cur); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10067,9 +10043,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, sa_out); cur = ggml_add(ctx0, cur, inpL); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10176,9 +10150,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10289,9 +10261,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10407,9 +10377,7 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10527,9 +10495,7 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10671,9 +10637,7 @@ struct llm_build_context { cb(cur, "hidden_scaled_ffn", -1); cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10786,9 +10750,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, sa_out); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -10907,9 +10869,7 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -11058,9 +11018,7 @@ struct llm_build_context { // residual cur = ggml_add(ctx0, cur, inpL); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -11202,9 +11160,7 @@ struct llm_build_context { // add together residual + FFN + self-attention cur = ggml_add(ctx0, cur, inpL); cur = ggml_add(ctx0, cur, attn_out); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -11340,9 +11296,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_inp); cb(cur, "ffn_out", il); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -11457,11 +11411,9 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, attn_out); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); - + // input for next layer inpL = cur; } else { @@ -11487,11 +11439,9 @@ struct llm_build_context { cb(cur, "ffn_out", il); cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); - + // input for next layer inpL = cur; } @@ -11620,9 +11570,7 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, ffn_out); cb(cur, "ffn_out", il); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer @@ -11855,9 +11803,7 @@ struct llm_build_context { } cur = ggml_add(ctx0, cur, ffn_inp); - if (ggml_tensor * layer_dir = lctx.cvec.tensor_for(il)) { - cur = ggml_add(ctx0, cur, layer_dir); - } + cur = lctx.cvec.apply_to(ctx0, cur, il); cb(cur, "l_out", il); // input for next layer