Skip to content

Commit

Permalink
Don't crash on ftype (formerly f16) == 4 (ggerganov#917)
Browse files Browse the repository at this point in the history
  • Loading branch information
sw authored Apr 12, 2023
1 parent f76cb3a commit e7f6997
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 1 deletion.
4 changes: 3 additions & 1 deletion llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -827,7 +827,9 @@ static const char *llama_ftype_name(enum llama_ftype ftype) {
case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16";
case LLAMA_FTYPE_MOSTLY_Q4_0: return "mostly Q4_0";
case LLAMA_FTYPE_MOSTLY_Q4_1: return "mostly Q4_1";
default: LLAMA_ASSERT(false);
case LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16:
return "mostly Q4_1, some F16";
default: return "unknown, may not work";
}
}

Expand Down
1 change: 1 addition & 0 deletions llama.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
};

LLAMA_API struct llama_context_params llama_context_default_params();
Expand Down

0 comments on commit e7f6997

Please sign in to comment.