Skip to content

Commit

Permalink
finetune : keep allocs alive until all allocations are done (ggergano…
Browse files Browse the repository at this point in the history
  • Loading branch information
slaren authored Dec 17, 2023
1 parent 0ffc92d commit 4566863
Showing 1 changed file with 7 additions and 8 deletions.
15 changes: 7 additions & 8 deletions examples/finetune/finetune.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1620,8 +1620,6 @@ int main(int argc, char ** argv) {
opt->params.adam.gclip = params.common.adam_gclip;
opt->params.adam.eps_f = params.common.adam_eps_f;

ggml_allocr * alloc = NULL;

printf("%s: init model\n", __func__);
bool existed = load_checkpoint_lora_file(params.common.fn_checkpoint_in, &model, &lora, train);

Expand Down Expand Up @@ -1725,10 +1723,9 @@ int main(int argc, char ** argv) {

// allocate input tensors
mem_input_data.resize(max_input_size);
alloc = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment);
ggml_allocr_alloc(alloc, tokens_input);
ggml_allocr_alloc(alloc, target_probs);
ggml_allocr_free(alloc);
ggml_allocr_t alloc_inps = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment);
ggml_allocr_alloc(alloc_inps, tokens_input);
ggml_allocr_alloc(alloc_inps, target_probs);

// context for compute tensors without their data
const size_t estimated_compute_size_wo_data = (
Expand All @@ -1755,7 +1752,7 @@ int main(int argc, char ** argv) {
// find best evaluation order
for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) {
ctx_compute = ggml_init(ctx_compute_params);
alloc = ggml_allocr_new_measure(tensor_alignment);
ggml_allocr_t alloc = ggml_allocr_new_measure(tensor_alignment);
gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
gf->order = (enum ggml_cgraph_eval_order) order;
gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
Expand Down Expand Up @@ -1788,7 +1785,7 @@ int main(int argc, char ** argv) {
// allocate compute tensors
mem_compute_data.resize(max_compute_size);
ctx_compute = ggml_init(ctx_compute_params);
alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment);
ggml_allocr_t alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment);
gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
gf->order = best_order;
gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
Expand All @@ -1804,6 +1801,8 @@ int main(int argc, char ** argv) {
params.common.use_checkpointing
);
ggml_allocr_free(alloc);
ggml_allocr_free(alloc_inps);


// tokenize data
std::vector<llama_token> train_tokens;
Expand Down

0 comments on commit 4566863

Please sign in to comment.