-
Notifications
You must be signed in to change notification settings - Fork 15
/
model-train.sh
executable file
·148 lines (134 loc) · 3.85 KB
/
model-train.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/bin/bash
#parse options
while getopts 'n:j:s:p:f:x:y:' OPTION; do
case "$OPTION" in
n)
JOB_NAME=$OPTARG ;;
j)
JBU_CKPT=$OPTARG ;;
s)
SCALE=$OPTARG ;;
p)
PRETRAIN_LR=$OPTARG ;;
f)
FINETUNE_LR=$OPTARG ;;
x)
PRBATCH=$OPTARG ;;
y)
FTRBATCH=$OPTARG ;;
?)
echo "Invalid option: -$OPTARG"
exit 1
;;
esac
done
echo "JOB_NAME: $JOB_NAME"
echo "JBU_CKPT: $JBU_CKPT"
echo "SCALE: $SCALE"
echo "PRETRAIN_LR: $PRETRAIN_LR"
echo "FINETUNE_LR: $FINETUNE_LR"
echo "PRBATCH: $PRBATCH"
echo "FTRBATCH: $FTRBATCH"
wandb offline
CKPT=llava-uhd-144-7b
mkdir -p /data/checkpoints/$JOB_NAME/checkpoints_new/$CKPT
OUTPUT_DIR=/data/checkpoints/$JOB_NAME/checkpoints_new/$CKPT
LLM_CKPT_DIR=./pretrained_models/vicuna-7b-v1.5
CLIP_CKPT_DIR=./pretrained_models/clip-vit-large-patch14-336
echo $OUTPUT_DIR
GPUS_PER_NODE=${GPUS_PER_NODE:-8}
WORLD_SIZE=${WORLD_SIZE:-1}
RANK=${RANK:-0}
MASTER_ADDR=${MASTER_ADDR:-"localhost"}
MASTER_PORT=${MASTER_PORT:-12345}
DISTRIBUTED_ARGS="
--nproc_per_node $GPUS_PER_NODE \
--nnodes $WORLD_SIZE \
--node_rank $RANK \
--master_addr $MASTER_ENDPOINT \
--master_port $MASTER_PORT "
echo $DISTRIBUTED_ARGS
#pretrain script
#total batch size == 256
torchrun $DISTRIBUTED_ARGS llava/train/train_mem.py \
--deepspeed $PWD/scripts/zero2.json \
--model_name_or_path $LLM_CKPT_DIR \
--version plain \
--feature_mode 'featup_muti_res' \
--data_path $PWD/playground/data/LLaVA-Pretrain/blip_laion_cc_sbu_558k.json \
--image_folder $PWD/playground/data/LLaVA-Pretrain/images \
--vision_tower $CLIP_CKPT_DIR \
--mm_projector_type adapt_spatial_resampler \
--tune_mm_mlp_adapter True \
--mm_vision_select_layer -2 \
--mm_use_im_start_end False \
--mm_use_im_patch_token False \
--bf16 True \
--output_dir $OUTPUT_DIR \
--num_train_epochs 1 \
--per_device_train_batch_size $PRBATCH \
--per_device_eval_batch_size 4 \
--gradient_accumulation_steps $ACCU_STEPS \
--evaluation_strategy "no" \
--save_strategy "steps" \
--save_steps 24000 \
--save_total_limit 1 \
--learning_rate 1e-3 \
--weight_decay 0. \
--warmup_ratio 0.03 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--tf32 True \
--model_max_length 3072 \
--gradient_checkpointing True \
--dataloader_num_workers 4 \
--lazy_preprocess True \
--report_to wandb \
--single True \
--jbu_ckpt $JBU_CKPT \
--feature_scale_mask $SCALE \
--sft_encoder False
#full ft script
#total batch size == 128
ACCU_STEPS=1
torchrun $DISTRIBUTED_ARGS llava/train/train_mem.py \
--deepspeed ./scripts/zero2.json \
--model_name_or_path $LLM_CKPT_DIR \
--version v1 \
--feature_mode 'featup_muti_res' \
--data_path ./llava_new_replace_text-new.json \
--image_folder ./llava_new \
--vision_tower $CLIP_CKPT_DIR \
--pretrain_mm_mlp_adapter $OUTPUT_DIR/mm_projector.bin \
--mm_projector_type adapt_spatial_resampler \
--mm_vision_select_layer -2 \
--mm_use_im_start_end False \
--mm_use_im_patch_token False \
--image_aspect_ratio pad \
--group_by_modality_length True \
--bf16 True \
--output_dir $OUTPUT_DIR \
--num_train_epochs 1 \
--per_device_train_batch_size $FTRBATCH \
--per_device_eval_batch_size 4 \
--gradient_accumulation_steps 2 \
--evaluation_strategy "no" \
--save_strategy "steps" \
--save_steps 100 \
--save_total_limit 600 \
--learning_rate 2e-5 \
--weight_decay 0. \
--warmup_ratio 0.03 \
--lr_scheduler_type "cosine" \
--logging_steps 1 \
--tf32 True \
--model_max_length 4096 \
--gradient_checkpointing True \
--dataloader_num_workers 4 \
--lazy_preprocess True \
--jbu_ckpt $JBU_CKPT \
--report_to wandb \
--sft_encoder True
# evaluation
pip install editdistance
sh eval.sh $CKPT