Skip to content

Commit

Permalink
llm_load_print_meta model params adjusted to display correct suffix
Browse files Browse the repository at this point in the history
  • Loading branch information
mofosyne committed Jan 9, 2024
1 parent 1fc2f26 commit 16e9463
Showing 1 changed file with 13 additions and 1 deletion.
14 changes: 13 additions & 1 deletion llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3145,7 +3145,19 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
if (ml.n_elements > 1e12) {
// Trillions Of Parameters
LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
} else if (ml.n_elements > 1e9) {
// Billions Of Parameters
LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
} else if (ml.n_elements > 1e6) {
// Millions Of Parameters
LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
} else {
// Thousands Of Parameters
LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
}
if (ml.n_bytes < GiB) {
LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
} else {
Expand Down

0 comments on commit 16e9463

Please sign in to comment.