Skip to content

Commit

Permalink
fixes ggerganov#7999 (adds control vectors to all build_XXX() funct…
Browse files Browse the repository at this point in the history
…ions in `llama.cpp` [needs testing] (ggerganov#8060)

* fixes ggerganov#7999

The `build_command_r` forgot to add the control vector.

* Fixes qwen2 too

* Fixed all models' control vectors

* Removed double calls to `cb(cur, "l_out", il)`

* Moved control vector logic to llama_control_vector:apply_to()
  • Loading branch information
jukofyork authored Jun 25, 2024
1 parent 6fcbf68 commit 163d50a
Showing 1 changed file with 73 additions and 39 deletions.
112 changes: 73 additions & 39 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2368,13 +2368,21 @@ struct llama_control_vector {
int32_t layer_start = -1;
int32_t layer_end = -1;

ggml_tensor * tensor_for(int il) const {
struct ggml_tensor * tensor_for(int il) const {
if (il < 0 || il < layer_start || il > layer_end || (size_t) il >= tensors.size()) {
return nullptr;
}
return tensors[il];
}

struct ggml_tensor * apply_to(struct ggml_context * ctx, struct ggml_tensor * cur, int il) const {
ggml_tensor * layer_dir = tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx, cur, layer_dir);
}
return cur;
}

~llama_control_vector() {
for (struct ggml_context * ctx : ctxs) {
ggml_free(ctx);
Expand Down Expand Up @@ -8023,10 +8031,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il);

ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -8141,6 +8146,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -8245,6 +8251,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -8360,9 +8367,8 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "l_out", il);

cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -8514,10 +8520,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il);

ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -8648,10 +8651,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il);

ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -8757,8 +8757,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il);
}

inpL = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}

cur = llm_build_norm(ctx0, inpL, hparams,
Expand Down Expand Up @@ -8846,6 +8850,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -9141,8 +9146,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il);
}

inpL = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}

cur = llm_build_norm(ctx0, inpL, hparams,
Expand Down Expand Up @@ -9276,6 +9285,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -9424,6 +9434,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -9536,6 +9547,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -9647,6 +9659,7 @@ struct llm_build_context {
cb(cur, "ffn_out", il);

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -9792,6 +9805,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -9912,11 +9926,11 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_output);
cb(cur, "l_out", il);

cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}

Expand Down Expand Up @@ -10048,8 +10062,10 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, residual, cur);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}

Expand Down Expand Up @@ -10148,9 +10164,8 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, sa_out);
cb(cur, "l_out", il);

cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -10256,8 +10271,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il);
}

inpL = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}

cur = llm_build_norm(ctx0, inpL, hparams,
Expand Down Expand Up @@ -10363,8 +10382,12 @@ struct llm_build_context {
cb(cur, "ffn_out", il);
}

inpL = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}

cur = llm_build_norm(ctx0, inpL, hparams,
Expand Down Expand Up @@ -10476,6 +10499,7 @@ struct llm_build_context {
cb(cur, "ffn_out", il);

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -10593,6 +10617,7 @@ struct llm_build_context {
cb(cur, "ffn_out", il);

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -10734,6 +10759,7 @@ struct llm_build_context {
cb(cur, "hidden_scaled_ffn", -1);

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -10846,6 +10872,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, sa_out);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -10962,7 +10989,9 @@ struct llm_build_context {
NULL,
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
cb(cur, "ffn_out", il);

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -11111,6 +11140,7 @@ struct llm_build_context {

// residual
cur = ggml_add(ctx0, cur, inpL);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -11252,6 +11282,7 @@ struct llm_build_context {
// add together residual + FFN + self-attention
cur = ggml_add(ctx0, cur, inpL);
cur = ggml_add(ctx0, cur, attn_out);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -11387,10 +11418,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_inp);
cb(cur, "ffn_out", il);

ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -11504,8 +11532,12 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, inpL);
cb(cur, "ffn_out", il);

inpL = ggml_add(ctx0, cur, attn_out);
cb(inpL, "l_out", il);
cur = ggml_add(ctx0, cur, attn_out);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
} else {
// attention and ffn are computed sequentially
// x = x + attn(ln1(x))
Expand All @@ -11528,8 +11560,12 @@ struct llm_build_context {
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
cb(cur, "ffn_out", il);

inpL = ggml_add(ctx0, cur, ffn_inp);
cb(inpL, "l_out", il);
cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
inpL = cur;
}
}

Expand Down Expand Up @@ -11656,10 +11692,7 @@ struct llm_build_context {
cur = ggml_add(ctx0, cur, ffn_out);
cb(cur, "ffn_out", il);

ggml_tensor * layer_dir = lctx.cvec.tensor_for(il);
if (layer_dir != nullptr) {
cur = ggml_add(ctx0, cur, layer_dir);
}
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down Expand Up @@ -11892,6 +11925,7 @@ struct llm_build_context {
}

cur = ggml_add(ctx0, cur, ffn_inp);
cur = lctx.cvec.apply_to(ctx0, cur, il);
cb(cur, "l_out", il);

// input for next layer
Expand Down

0 comments on commit 163d50a

Please sign in to comment.