Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix resource leaks #6061

Merged
merged 1 commit into from
Mar 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/gguf/gguf.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ static bool gguf_ex_read_1(const std::string & fname) {
for (int j = 0; j < ggml_nelements(cur); ++j) {
if (data[j] != 100 + i) {
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
gguf_free(ctx);
return false;
}
}
Expand Down
1 change: 1 addition & 0 deletions examples/llama-bench/llama-bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,7 @@ static std::string get_cpu_info() {
}
}
}
fclose(f);
}
#endif
// TODO: other platforms
Expand Down
4 changes: 4 additions & 0 deletions examples/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -995,13 +995,15 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
if (!new_clip->ctx_data) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
clip_free(new_clip);
gguf_free(ctx);
return nullptr;
}

auto fin = std::ifstream(fname, std::ios::binary);
if (!fin) {
printf("cannot open model file for loading tensors\n");
clip_free(new_clip);
gguf_free(ctx);
return nullptr;
}

Expand All @@ -1023,6 +1025,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
if (!fin) {
printf("%s: failed to seek for tensor %s\n", __func__, name);
clip_free(new_clip);
gguf_free(ctx);
return nullptr;
}
int num_bytes = ggml_nbytes(cur);
Expand Down Expand Up @@ -1908,6 +1911,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
break;
default:
printf("Please use an input file in f32 or f16\n");
gguf_free(ctx_out);
return false;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -711,6 +711,7 @@ static bool load_checkpoint_file(const char * filename, struct my_llama_model *

load_checkpoint_gguf(fctx, f_ggml_ctx, model, train);

gguf_free(fctx);
return true;
}

Expand Down
Loading