-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_colbert.py
117 lines (98 loc) · 3.61 KB
/
train_colbert.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import logging
import os
import sys
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer
from transformers import (
HfArgumentParser,
set_seed,
)
import ir_datasets
import torch
from tevatron.arguments import (
ModelArguments,
DataArguments,
TevatronTrainingArguments as TrainingArguments,
)
from tevatron.data import TrainDataset, QPCollator
from tevatron.modeling import ColbertModel
from tevatron.trainer import TevatronTrainer as Trainer, GCTrainer
from tevatron.datasets import HFTrainDataset
from denserr.train.perturbed_train_dataset import (
PerturbedTrainDataset,
)
from denserr.train.colbert_train_dataset import (
ColBERTPerturbedHFTrainDataset,
)
from denserr.model.colbert_train import load_train_colbert
logger = logging.getLogger(__name__)
def main() -> None:
parser = HfArgumentParser((ModelArguments, DataArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: TrainingArguments
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
logger.info("MODEL parameters %s", model_args)
set_seed(training_args.seed)
num_labels = 1
# model = AnceForTrain.from_pretrained(model_args.model_name_or_path)
model, _, query_tokenizer, doc_tokenizer = load_train_colbert()
train_dataset = ColBERTPerturbedHFTrainDataset(
query_tokenizer=query_tokenizer,
doc_tokenizer=doc_tokenizer,
data_args=data_args,
cache_dir=data_args.data_cache_dir or model_args.cache_dir,
)
tokenizer = doc_tokenizer.tok
if training_args.local_rank > 0:
print("Waiting for main process to perform the mapping")
torch.distributed.barrier()
train_dataset = PerturbedTrainDataset(data_args, train_dataset.process(), tokenizer)
if training_args.local_rank == 0:
print("Loading results from main process")
torch.distributed.barrier()
trainer_cls = GCTrainer if training_args.grad_cache else Trainer
trainer = trainer_cls(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=QPCollator(
tokenizer, max_p_len=data_args.p_max_len, max_q_len=data_args.q_max_len
),
)
train_dataset.trainer = trainer
trainer.train()
trainer.save_model()
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
if __name__ == "__main__":
main()