Skip to content

Commit

Permalink
sampling : improve mirostat implementation
Browse files Browse the repository at this point in the history
ggml-ci
  • Loading branch information
ggerganov committed Sep 5, 2024
1 parent 33bdb62 commit 8307e96
Show file tree
Hide file tree
Showing 7 changed files with 95 additions and 88 deletions.
22 changes: 11 additions & 11 deletions common/sampling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ struct gpt_sampler {
cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
}

cur_p = { cur.data(), cur.size(), LLAMA_TOKEN_NULL, false };
cur_p = { cur.data(), cur.size(), -1, false };
}
};

Expand Down Expand Up @@ -202,17 +202,17 @@ struct gpt_sampler * gpt_sampler_init(const struct llama_model * model, const st
GGML_ASSERT(false && "unknown sampler type");
}
}
llama_sampler_chain_add(result->chain, llama_sampler_init_softmax());
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
} else if (params.mirostat == 1) {
llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(model, params.mirostat_tau, params.mirostat_eta));
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat(model, params.seed, params.mirostat_tau, params.mirostat_eta));
} else if (params.mirostat == 2) {
llama_sampler_chain_add(result->chain, llama_sampler_init_temp(params.temp));
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.mirostat_tau, params.mirostat_eta));
llama_sampler_chain_add(result->chain, llama_sampler_init_mirostat_v2(params.seed, params.mirostat_tau, params.mirostat_eta));
} else {
GGML_ASSERT(false && "unknown mirostat version");
}
llama_sampler_chain_add(result->chain, llama_sampler_init_softmax());
llama_sampler_chain_add(result->chain, llama_sampler_init_dist(params.seed));
} else {
llama_sampler_chain_add(result->chain, llama_sampler_init_softmax());
llama_sampler_chain_add(result->chain, llama_sampler_init_greedy());
Expand Down Expand Up @@ -246,8 +246,8 @@ struct gpt_sampler * gpt_sampler_clone(gpt_sampler * gsmpl) {
};
}

void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool apply_grammar) {
if (apply_grammar) {
void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool accept_grammar) {
if (accept_grammar) {
llama_sampler_accept(gsmpl->grmr, token);
}

Expand Down Expand Up @@ -293,9 +293,9 @@ llama_token gpt_sampler_sample(struct gpt_sampler * gsmpl, struct llama_context

llama_sampler_apply(chain, &cur_p);

const llama_token id = cur_p.data[cur_p.selected].id;
GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration");

GGML_ASSERT(id != LLAMA_TOKEN_NULL && "null token in the sampling history - check your sampling configuration");
const llama_token id = cur_p.data[cur_p.selected].id;

if (grammar_first) {
return id;
Expand All @@ -304,7 +304,7 @@ llama_token gpt_sampler_sample(struct gpt_sampler * gsmpl, struct llama_context
// check if it the sampled token fits the grammar
{
llama_token_data single_token_data = { id, 1.0f, 0.0f };
llama_token_data_array single_token_data_array = { &single_token_data, 1, LLAMA_TOKEN_NULL, false };
llama_token_data_array single_token_data_array = { &single_token_data, 1, -1, false };

llama_sampler_apply(grmr, &single_token_data_array);

Expand All @@ -324,7 +324,7 @@ llama_token gpt_sampler_sample(struct gpt_sampler * gsmpl, struct llama_context

llama_sampler_apply(chain, &cur_p);

GGML_ASSERT(cur_p.data[cur_p.selected].id != LLAMA_TOKEN_NULL && "null token in the sampling history - check your sampling configuration");
GGML_ASSERT(cur_p.selected != -1 && "no selected token during sampling - check your sampling configuration");

return cur_p.data[cur_p.selected].id;
}
Expand Down
2 changes: 1 addition & 1 deletion common/sampling.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void gpt_sampler_free(struct gpt_sampler * gsmpl);

struct gpt_sampler * gpt_sampler_clone(gpt_sampler * gsmpl);

void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool apply_grammar);
void gpt_sampler_accept(struct gpt_sampler * gsmpl, llama_token token, bool accept_grammar);
void gpt_sampler_reset (struct gpt_sampler * gsmpl);

llama_token_data_array * gpt_sampler_get_candidates(struct gpt_sampler * gsmpl);
Expand Down
2 changes: 2 additions & 0 deletions include/llama.h
Original file line number Diff line number Diff line change
Expand Up @@ -1066,6 +1066,7 @@ extern "C" {
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
LLAMA_API struct llama_sampler * llama_sampler_init_mirostat(
const struct llama_model * model,
uint32_t seed,
float tau,
float eta);

Expand All @@ -1075,6 +1076,7 @@ extern "C" {
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
LLAMA_API struct llama_sampler * llama_sampler_init_mirostat_v2(
uint32_t seed,
float tau,
float eta);

Expand Down
133 changes: 68 additions & 65 deletions src/llama-sampling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,17 @@
#include <random>
#include <unordered_map>

static int llama_sample_dist(llama_token_data_array * cur_p, std::mt19937 & rng, std::vector<float> & probs) {
probs.resize(cur_p->size);
for (size_t i = 0; i < cur_p->size; ++i) {
probs[i] = cur_p->data[i].p;
}

std::discrete_distribution<size_t> dist(probs.begin(), probs.end());

return dist(rng);
}

static void llama_log_softmax(float * array, size_t size) {
float max_l = *std::max_element(array, array + size);
float sum = 0.f;
Expand Down Expand Up @@ -456,22 +467,16 @@ struct llama_sampler_context_dist {
const uint32_t seed;

std::mt19937 rng;

std::vector<float> probs; // work array
};

static struct llama_sampler_i llama_sampler_dist_i = {
/* .name = */ [](const struct llama_sampler * /*smpl*/) { return "dist"; },
/* .accept = */ nullptr,
/* .apply = */ [](struct llama_sampler * smpl, llama_token_data_array * cur_p) {
auto * ctx = (llama_sampler_context_dist *) smpl->ctx;
std::vector<float> probs;
probs.reserve(cur_p->size);
for (size_t i = 0; i < cur_p->size; ++i) {
probs.push_back(cur_p->data[i].p);
}

std::discrete_distribution<size_t> dist(probs.begin(), probs.end());

cur_p->selected = dist(ctx->rng);
cur_p->selected = llama_sample_dist(cur_p, ctx->rng, ctx->probs);
},
/* .reset = */ nullptr,
/* .clone = */ [](const struct llama_sampler * smpl) {
Expand All @@ -489,6 +494,7 @@ struct llama_sampler * llama_sampler_init_dist_impl(uint32_t seed) {
/* .ctx = */ new llama_sampler_context_dist {
/* .seed = */ seed,
/* .rng = */ std::mt19937(seed),
/* .probs = */ {},
},
};
}
Expand Down Expand Up @@ -761,35 +767,23 @@ struct llama_sampler * llama_sampler_init_temp_ext_impl(float temp, float delta,
struct llama_sampler_context_mirostat {
const struct llama_vocab * vocab;

const uint32_t seed;

const float tau;
const float eta;

const int32_t m;

float mu;

std::vector<llama_token_data> cur;
std::mt19937 rng;

std::vector<float> probs;
};

static struct llama_sampler_i llama_sampler_mirostat_i = {
/* .name = */ [](const struct llama_sampler * /*smpl*/) { return "mirostat"; },
/* .accept = */ [](struct llama_sampler * smpl, llama_token token) {
auto * ctx = (llama_sampler_context_mirostat *) smpl->ctx;

int32_t idx = -1;
for (size_t i = 0; i < ctx->cur.size(); ++i) {
if (ctx->cur[i].id == token) {
idx = i;
break;
}
}

float observed_surprise = -log2f(ctx->cur[idx].p);
float e = observed_surprise - ctx->tau;

// Update mu using the learning rate and error
ctx->mu = ctx->mu - ctx->eta * e;
},
/* .accept = */ nullptr,
/* .apply = */ [](struct llama_sampler * smpl, llama_token_data_array * cur_p) {
auto * ctx = (llama_sampler_context_mirostat *) smpl->ctx;

Expand All @@ -812,70 +806,66 @@ static struct llama_sampler_i llama_sampler_mirostat_i = {
float k = powf((epsilon_hat * powf(2, ctx->mu)) / (1 - powf(ctx->vocab->n_vocab, -epsilon_hat)), 1 / s_hat);

llama_sampler_top_k_impl(cur_p, std::max(int(k), 1));
llama_sampler_softmax_impl(cur_p);

// remember the order to be able to compute the distance later when accepting the token
ctx->cur.resize(cur_p->size);
for (size_t i = 0; i < cur_p->size; ++i) {
ctx->cur[i] = cur_p->data[i];
}
const int idx = llama_sample_dist(cur_p, ctx->rng, ctx->probs);

cur_p->selected = idx;

float observed_surprise = -log2f(cur_p->data[idx].p);
float e = observed_surprise - ctx->tau;

// Update mu using the learning rate and error
ctx->mu = ctx->mu - ctx->eta * e;
},
/* .reset = */ [](struct llama_sampler * smpl) {
auto * ctx = (llama_sampler_context_mirostat *) smpl->ctx;
ctx->mu = 2.0f*ctx->tau;
ctx->rng = std::mt19937(ctx->seed);
},
/* .clone = */ [](const struct llama_sampler * smpl) {
const auto * ctx = (const llama_sampler_context_mirostat *) smpl->ctx;
return llama_sampler_init_mirostat_impl(*ctx->vocab, ctx->tau, ctx->eta, ctx->m);
return llama_sampler_init_mirostat_impl(*ctx->vocab, ctx->seed, ctx->tau, ctx->eta, ctx->m);
},
/* .free = */ [](struct llama_sampler * smpl) {
delete (llama_sampler_context_mirostat *) smpl->ctx;
},
};

struct llama_sampler * llama_sampler_init_mirostat_impl(const struct llama_vocab & vocab, float tau, float eta, int32_t m) {
struct llama_sampler * llama_sampler_init_mirostat_impl(const struct llama_vocab & vocab, uint32_t seed, float tau, float eta, int32_t m) {
return new llama_sampler {
/* .iface = */ &llama_sampler_mirostat_i,
/* .ctx = */ new llama_sampler_context_mirostat {
/* .vocab = */ &vocab,
/* .seed = */ seed,
/* .tau = */ tau,
/* .eta = */ eta,
/* .m = */ m,
/* .mu = */ 2.0f*tau,
/* .cur = */ {},
/* .rng = */ std::mt19937(seed),
/* .probs = */ {},
},
};
}

// mirostat v2

struct llama_sampler_context_mirostat_v2 {
const uint32_t seed;

const float tau;
const float eta;

float mu;

std::vector<llama_token_data> cur;
std::mt19937 rng;

std::vector<float> probs;
};

static struct llama_sampler_i llama_sampler_mirostat_v2_i = {
/* .name = */ [](const struct llama_sampler * /*smpl*/) { return "mirostat-v2"; },
/* .accept = */ [](struct llama_sampler * smpl, llama_token token) {
auto * ctx = (llama_sampler_context_mirostat_v2 *) smpl->ctx;

int32_t idx = -1;
for (size_t i = 0; i < ctx->cur.size(); ++i) {
if (ctx->cur[i].id == token) {
idx = i;
break;
}
}

float observed_surprise = -log2f(ctx->cur[idx].p);
float e = observed_surprise - ctx->tau;

// Update mu using the learning rate and error
ctx->mu = ctx->mu - ctx->eta * e;
},
/* .accept = */ nullptr,
/* .apply = */ [](struct llama_sampler * smpl, llama_token_data_array * cur_p) {
auto * ctx = (llama_sampler_context_mirostat_v2 *) smpl->ctx;

Expand All @@ -893,33 +883,40 @@ static struct llama_sampler_i llama_sampler_mirostat_v2_i = {
// Normalize the probabilities of the remaining words
llama_sampler_softmax_impl(cur_p);

// remember the order to be able to compute the distance later when accepting the token
ctx->cur.resize(cur_p->size);
for (size_t i = 0; i < cur_p->size; ++i) {
ctx->cur[i] = cur_p->data[i];
}
const int idx = llama_sample_dist(cur_p, ctx->rng, ctx->probs);

cur_p->selected = idx;

float observed_surprise = -log2f(cur_p->data[idx].p);
float e = observed_surprise - ctx->tau;

// Update mu using the learning rate and error
ctx->mu = ctx->mu - ctx->eta * e;
},
/* .reset = */ [](struct llama_sampler * smpl) {
auto * ctx = (llama_sampler_context_mirostat_v2 *) smpl->ctx;
ctx->mu = 2.0f*ctx->tau;
ctx->rng = std::mt19937(ctx->seed);
},
/* .clone = */ [](const struct llama_sampler * smpl) {
const auto * ctx = (const llama_sampler_context_mirostat_v2 *) smpl->ctx;
return llama_sampler_init_mirostat_v2_impl(ctx->tau, ctx->eta);
return llama_sampler_init_mirostat_v2_impl(ctx->seed, ctx->tau, ctx->eta);
},
/* .free = */ [](struct llama_sampler * smpl) {
delete (llama_sampler_context_mirostat_v2 *) smpl->ctx;
},
};

struct llama_sampler * llama_sampler_init_mirostat_v2_impl(float tau, float eta) {
struct llama_sampler * llama_sampler_init_mirostat_v2_impl(uint32_t seed, float tau, float eta) {
return new llama_sampler {
/* .iface = */ &llama_sampler_mirostat_v2_i,
/* .ctx = */ new llama_sampler_context_mirostat_v2 {
/* .tau = */ tau,
/* .eta = */ eta,
/* .mu = */ 2.0f*tau,
/* .cur = */ {},
/* .seed = */ seed,
/* .tau = */ tau,
/* .eta = */ eta,
/* .mu = */ 2.0f*tau,
/* .rng = */ std::mt19937(seed),
/* .probs = */ {},
},
};
}
Expand Down Expand Up @@ -1154,9 +1151,15 @@ struct llama_sampler * llama_sampler_init_logit_bias_impl(

static struct llama_sampler_i llama_sampler_chain_i = {
/* .name = */ [](const struct llama_sampler * /*smpl*/) { return "chain"; },
/* .accept = */ [](struct llama_sampler * smpl, llama_token /*token*/) {
/* .accept = */ [](struct llama_sampler * smpl, llama_token token) {
auto * chain = (llama_sampler_chain *) smpl->ctx;

time_meas tm(chain->t_sample_us, chain->params.no_timing);

for (auto * smpl : chain->samplers) {
llama_sampler_accept_impl(*smpl, token);
}

chain->n_sample++;
},
/* .apply = */ [](struct llama_sampler * smpl, llama_token_data_array * cur_p) {
Expand Down
2 changes: 2 additions & 0 deletions src/llama-sampling.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,13 @@ struct llama_sampler * llama_sampler_init_temp_ext_impl (float t, float delta

struct llama_sampler * llama_sampler_init_mirostat_impl(
const struct llama_vocab & vocab,
uint32_t seed,
float tau,
float eta,
int32_t m);

struct llama_sampler * llama_sampler_init_mirostat_v2_impl(
uint32_t seed,
float tau,
float eta);

Expand Down
8 changes: 4 additions & 4 deletions src/llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20646,12 +20646,12 @@ struct llama_sampler * llama_sampler_init_temp_ext(float temp, float delta, floa
return llama_sampler_init_temp_ext_impl(temp, delta, exponent);
}

struct llama_sampler * llama_sampler_init_mirostat(const struct llama_model * model, float tau, float eta) {
return llama_sampler_init_mirostat_impl(model->vocab, tau, eta, 100);
struct llama_sampler * llama_sampler_init_mirostat(const struct llama_model * model, uint32_t seed, float tau, float eta) {
return llama_sampler_init_mirostat_impl(model->vocab, seed, tau, eta, 100);
}

struct llama_sampler * llama_sampler_init_mirostat_v2(float tau, float eta) {
return llama_sampler_init_mirostat_v2_impl(tau, eta);
struct llama_sampler * llama_sampler_init_mirostat_v2(uint32_t seed, float tau, float eta) {
return llama_sampler_init_mirostat_v2_impl(seed, tau, eta);
}

struct llama_sampler * llama_sampler_init_grammar(const struct llama_model * model, const char * grammar_str, const char * grammar_root) {
Expand Down
Loading

0 comments on commit 8307e96

Please sign in to comment.