-
Notifications
You must be signed in to change notification settings - Fork 0
/
llm_finetuning.py
107 lines (87 loc) · 3.71 KB
/
llm_finetuning.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# -*- coding: utf-8 -*-
from scripts import io
from scripts.dataset import SynthesisDatasetBuilder
from scripts.configs import BaseConfig
import transformers
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
import bitsandbytes as bnb
from dotenv import find_dotenv, load_dotenv
import os
from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model
from trl import SFTTrainer
_ = load_dotenv(find_dotenv())
def find_all_linear_names(model):
# if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear)
cls = bnb.nn.Linear4bit
lora_module_names = set()
for name, module in model.named_modules():
if isinstance(module, cls):
names = name.split('.')
lora_module_names.add(names[0] if len(names) == 1 else names[-1])
# needed for 16-bit
if 'lm_head' in lora_module_names:
lora_module_names.remove('lm_head')
return list(lora_module_names)
if __name__ == '__main__':
access_token = os.environ['HUGGINGFACE_ACCESS_TOKEN']
args = BaseConfig().get_args()
print("args.llm_warmup_dir:", args.llm_warmup_dir)
print("args.base_model_id:", args.base_model_id)
print("---" * 30)
df = io.read_csv(args.orkg_synthesis_train_llm)
print("size of the dataset is: ", df.shape[0])
print(df.columns)
dataset_builder = SynthesisDatasetBuilder(df=df,
prompt_template=args.synthesis_prompt_template,
synthesis_type_dict=args.synthesis_type_dict)
train_data = dataset_builder.orkg_synthesis_llm(is_llama=args.is_llama)
print("datase size:", len(train_data))
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
model = AutoModelForCausalLM.from_pretrained(args.base_model_id, quantization_config=bnb_config, device_map={"": 0}, token=access_token)
padding_side = 'left' if args.is_llama else "right"
tokenizer = AutoTokenizer.from_pretrained(args.base_model_id, add_eos_token=True, padding_side=padding_side, token=access_token)
model.gradient_checkpointing_enable()
model = prepare_model_for_kbit_training(model)
modules = find_all_linear_names(model)
print('linear layers for fine tuning:', modules)
lora_config = LoraConfig(
r=8,
lora_alpha=32,
target_modules=modules,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
model = get_peft_model(model, lora_config)
trainable, total = model.get_nb_trainable_parameters()
print(f"Trainable: {trainable} | total: {total} | Percentage: {trainable/total*100:.4f}%")
tokenizer.pad_token = tokenizer.eos_token
torch.cuda.empty_cache()
trainer = SFTTrainer(
model=model,
train_dataset=train_data,
dataset_text_field="prompt-template",
peft_config=lora_config,
args=transformers.TrainingArguments(
per_device_train_batch_size=1,
gradient_accumulation_steps=4,
warmup_steps=0.03,
learning_rate=2e-4,
logging_steps=1,
output_dir=args.llm_warmup_dir,
optim="paged_adamw_8bit",
save_strategy="epoch",
num_train_epochs=args.llm_num_train_epochs
),
max_seq_length=args.max_token_len,
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
trainer.train()
trainer.model.save_pretrained(args.llm_warmup_dir)