Skip to content

Commit

Permalink
ppl : fix n_seq_max for perplexity (ggerganov#8277)
Browse files Browse the repository at this point in the history
* ppl : fix n_seq_max for perplexity

* use 1 seq for kl_divergence
  • Loading branch information
slaren authored Jul 3, 2024
1 parent 916248a commit 5f2d4e6
Showing 1 changed file with 6 additions and 3 deletions.
9 changes: 6 additions & 3 deletions examples/perplexity/perplexity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1991,6 +1991,12 @@ int main(int argc, char ** argv) {
params.n_batch = std::min(params.n_batch, n_kv);
} else {
params.n_batch = std::min(params.n_batch, params.n_ctx);
if (params.kl_divergence) {
params.n_parallel = 1;
} else {
// ensure there's at least enough seq_ids for HellaSwag
params.n_parallel = std::max(4, params.n_parallel);
}
}

if (params.ppl_stride > 0) {
Expand All @@ -2015,9 +2021,6 @@ int main(int argc, char ** argv) {
llama_model * model;
llama_context * ctx;

// ensure there's at least enough seq_ids for HellaSwag
params.n_parallel = std::max(4, params.n_parallel);

// load the model and apply lora adapter, if any
std::tie(model, ctx) = llama_init_from_gpt_params(params);
if (model == NULL) {
Expand Down

0 comments on commit 5f2d4e6

Please sign in to comment.