diff --git a/.gitignore b/.gitignore index 22b9bb3..f4911f8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ build/ dist/ *.egg-info -**/__pycache__ \ No newline at end of file +**/__pycache__ +wandb/** +output/** \ No newline at end of file diff --git a/README.md b/README.md index cabfa02..19d1b80 100644 --- a/README.md +++ b/README.md @@ -14,15 +14,17 @@ LLM2Vec is a simple recipe to convert decoder-only LLMs into text encoders. It c
## Installation -To use LLM2Vec, first install the llm2vec package from PyPI. +To use LLM2Vec, first install the llm2vec package from PyPI, followed by installing flash-attention: ```bash pip install llm2vec +pip install flash-attn --no-build-isolation ``` -You can also directly install it from our code by cloning the repository and: +You can also directly install the latest version of llm2vec by cloning the repository: ```bash pip install -e . +pip install flash-attn --no-build-isolation ``` ## Getting Started diff --git a/experiments/run_mntp.py b/experiments/run_mntp.py new file mode 100644 index 0000000..1c4066a --- /dev/null +++ b/experiments/run_mntp.py @@ -0,0 +1,981 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2020 The HuggingFace Team All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The script is adapted from https://github.com/huggingface/transformers/blob/51bcadc10a569847b93a30dbe3a077037ae63bad/examples/pytorch/language-modeling/run_mlm.py +""" + +import logging +import math +import os +import sys +import warnings +from dataclasses import dataclass, field +from itertools import chain +from typing import Optional, Any, Tuple, List +import numpy as np + +import datasets +import evaluate +from datasets import load_dataset + +import torch +import transformers +from transformers import ( + CONFIG_MAPPING, + MODEL_FOR_MASKED_LM_MAPPING, + AutoConfig, + AutoTokenizer, + DataCollatorForLanguageModeling, + HfArgumentParser, + Trainer, + TrainingArguments, + TrainerCallback, + is_torch_tpu_available, + set_seed, +) +from transformers.trainer_utils import get_last_checkpoint +from transformers.utils import send_example_telemetry +from transformers.utils.versions import require_version + +from peft import LoraConfig, get_peft_model + +from llm2vec.models import MistralBiForMNTP, LlamaBiForMNTP + +# Will error if the minimal version of Transformers is not installed. Remove at your own risks. +# check_min_version("4.38.0.dev0") + +require_version( + "datasets>=1.8.0", + "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt", +) + +logger = logging.getLogger(__name__) +MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) +MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) + + +def get_model_class(config): + config_class_name = config.__class__.__name__ + if config_class_name == "MistralConfig": + return MistralBiForMNTP + elif config_class_name == "LlamaConfig": + return LlamaBiForMNTP + else: + raise ValueError(f"Model class {config_class_name} not supported.") + + +def initialize_peft( + model, + lora_r: int = 8, + lora_alpha: int = 16, + lora_dropout: float = 0.05, + lora_modules: Optional[List[str]] = None, +): + if lora_modules is None and model.config.__class__.__name__ in [ + "LlamaConfig", + "MistralConfig", + ]: + lora_modules = [ + "q_proj", + "v_proj", + "k_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + ] + elif lora_modules is None: + raise ValueError("lora_modules must be specified for this model.") + + config = LoraConfig( + r=lora_r, + lora_alpha=lora_alpha, + target_modules=lora_modules, + lora_dropout=lora_dropout, + bias="none", + task_type=None, + ) + # model organization is MODEL_TYPEBiForMNTP.model -> MODEL_TYPELBiModel, we have to apply PEFT to the inner model + peft_model = get_peft_model(model.get_model_for_peft(), config) + print(f"Model's Lora trainable parameters:") + peft_model.print_trainable_parameters() + model.set_model_for_peft(peft_model) + return model + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." + ) + }, + ) + model_type: Optional[str] = field( + default=None, + metadata={ + "help": "If training from scratch, pass a model type from the list: " + + ", ".join(MODEL_TYPES) + }, + ) + config_overrides: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override some existing default config settings when a model is trained from scratch. Example: " + "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" + ) + }, + ) + config_name: Optional[str] = field( + default=None, + metadata={ + "help": "Pretrained config name or path if not the same as model_name" + }, + ) + tokenizer_name: Optional[str] = field( + default=None, + metadata={ + "help": "Pretrained tokenizer name or path if not the same as model_name" + }, + ) + cache_dir: Optional[str] = field( + default=None, + metadata={ + "help": "Where do you want to store the pretrained models downloaded from huggingface.co" + }, + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={ + "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not." + }, + ) + model_revision: str = field( + default="main", + metadata={ + "help": "The specific model version to use (can be a branch name, tag name or commit id)." + }, + ) + token: str = field( + default=None, + metadata={ + "help": ( + "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " + "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." + ) + }, + ) + use_auth_token: bool = field( + default=None, + metadata={ + "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." + }, + ) + trust_remote_code: bool = field( + default=False, + metadata={ + "help": ( + "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " + "should only be set to `True` for repositories you trust and in which you have read the code, as it will " + "execute code present on the Hub on your local machine." + ) + }, + ) + torch_dtype: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " + "dtype will be automatically derived from the model's weights." + ), + "choices": ["auto", "bfloat16", "float16", "float32"], + }, + ) + attn_implementation: Optional[str] = field( + default="sdpa", + metadata={ + "help": ("The attention implementation to use in the model."), + "choices": ["eager", "sdpa", "flash_attention_2"], + }, + ) + low_cpu_mem_usage: bool = field( + default=False, + metadata={ + "help": ( + "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. " + "set True will benefit LLM loading time and RAM consumption." + ) + }, + ) + + def __post_init__(self): + if self.config_overrides is not None and ( + self.config_name is not None or self.model_name_or_path is not None + ): + raise ValueError( + "--config_overrides can't be used in combination with --config_name or --model_name_or_path" + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + dataset_name: Optional[str] = field( + default=None, + metadata={"help": "The name of the dataset to use (via the datasets library)."}, + ) + dataset_config_name: Optional[str] = field( + default=None, + metadata={ + "help": "The configuration name of the dataset to use (via the datasets library)." + }, + ) + train_file: Optional[str] = field( + default=None, metadata={"help": "The input training data file (a text file)."} + ) + validation_file: Optional[str] = field( + default=None, + metadata={ + "help": "An optional input evaluation data file to evaluate the perplexity on (a text file)." + }, + ) + overwrite_cache: bool = field( + default=True, + metadata={"help": "Overwrite the cached training and evaluation sets"}, + ) + validation_split_percentage: Optional[int] = field( + default=5, + metadata={ + "help": "The percentage of the train set used as validation set in case there's no validation split" + }, + ) + max_seq_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated." + ) + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + mlm_probability: float = field( + default=0.15, + metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}, + ) + line_by_line: bool = field( + default=False, + metadata={ + "help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences." + }, + ) + pad_to_max_length: bool = field( + default=False, + metadata={ + "help": ( + "Whether to pad all samples to `max_seq_length`. " + "If False, will pad the samples dynamically when batching to the maximum length in the batch." + ) + }, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ) + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + ) + }, + ) + streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) + + def __post_init__(self): + if self.streaming: + require_version( + "datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`" + ) + + if ( + self.dataset_name is None + and self.train_file is None + and self.validation_file is None + ): + raise ValueError( + "Need either a dataset name or a training/validation file." + ) + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + if extension not in ["csv", "json", "txt"]: + raise ValueError( + "`train_file` should be a csv, a json or a txt file." + ) + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + if extension not in ["csv", "json", "txt"]: + raise ValueError( + "`validation_file` should be a csv, a json or a txt file." + ) + + +# add more arguments +@dataclass +class CustomArguments: + """ + Custom arguments for the script + """ + + lora_dropout: float = field( + default=0.05, metadata={"help": "The dropout rate for lora"} + ) + + lora_r: int = field(default=8, metadata={"help": "The r value for lora"}) + + mask_token_type: str = field( + default="blank", + metadata={"help": "The type of mask token. Options: blank, eos, mask"}, + ) + + stop_after_n_steps: int = field( + default=10000, metadata={"help": "Stop training after n steps"} + ) + + data_collator_type: str = field( + default="default", + metadata={"help": "The type of data collator. Options: default, all_mask"}, + ) + + +class DataCollatorForLanguageModelingWithFullMasking(DataCollatorForLanguageModeling): + def torch_mask_tokens( + self, + inputs: Any, + special_tokens_mask: Optional[Any] = None, + ) -> Tuple[Any, Any]: + """ + Prepare masked tokens inputs/labels for masked language modeling: 100% MASK, 0% random, 0% original. + """ + import torch + + labels = inputs.clone() + # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) + probability_matrix = torch.full(labels.shape, self.mlm_probability) + if special_tokens_mask is None: + special_tokens_mask = [ + self.tokenizer.get_special_tokens_mask( + val, already_has_special_tokens=True + ) + for val in labels.tolist() + ] + special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) + else: + special_tokens_mask = special_tokens_mask.bool() + + probability_matrix.masked_fill_(special_tokens_mask, value=0.0) + masked_indices = torch.bernoulli(probability_matrix).bool() + labels[~masked_indices] = -100 # We only compute loss on masked tokens + + # 100% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) + inputs[masked_indices] = self.tokenizer.convert_tokens_to_ids( + self.tokenizer.mask_token + ) + + return inputs, labels + + +class StopTrainingCallback(TrainerCallback): + def __init__(self, stop_after_n_steps: int): + self.stop_after_n_steps = stop_after_n_steps + + def on_step_end(self, args, state, control, **kwargs): + if state.global_step >= self.stop_after_n_steps: + control.should_training_stop = True + + +class MNTPTrainer(Trainer): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.label_names = ["labels"] + + def _remove_unused_columns( + self, dataset: "datasets.Dataset", description: Optional[str] = None + ): + return dataset + + # We need a custom save function as we have to save the inner model + def _save(self, output_dir: Optional[str] = None, state_dict=None): + # If we are executing this function, we are the process zero, so we don't check for that. + output_dir = output_dir if output_dir is not None else self.args.output_dir + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving model checkpoint to {output_dir}") + + # model organization is MODEL_TYPEBiForMNTP.model -> MODEL_TYPELBiModel, we have to save the inner model, handled by save_peft_model function of the outer model + self.model.save_peft_model(output_dir) + self.tokenizer.save_pretrained(output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(self.args, os.path.join(output_dir, "training_args.bin")) + + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser( + (ModelArguments, DataTrainingArguments, TrainingArguments, CustomArguments) + ) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args, custom_args = parser.parse_json_file( + json_file=os.path.abspath(sys.argv[1]) + ) + else: + ( + model_args, + data_args, + training_args, + custom_args, + ) = parser.parse_args_into_dataclasses() + + if training_args.gradient_checkpointing: + training_args.gradient_checkpointing_kwargs = {"use_reentrant": False} + + if model_args.use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", + FutureWarning, + ) + if model_args.token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + model_args.token = model_args.use_auth_token + + # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The + # information sent is the one passed as arguments along with your Python/PyTorch versions. + send_example_telemetry("run_mlm", model_args, data_args) + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + + if training_args.should_log: + # The default of training_args.log_level is passive, so we set log level at info here to have that default. + transformers.utils.logging.set_verbosity_info() + + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" + ) + # Set the verbosity to info of the Transformers logger (on main process only): + logger.info(f"Training/evaluation parameters {training_args}") + + # Detecting last checkpoint. + last_checkpoint = None + if ( + os.path.isdir(training_args.output_dir) + and training_args.do_train + and not training_args.overwrite_output_dir + ): + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif ( + last_checkpoint is not None and training_args.resume_from_checkpoint is None + ): + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) + # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ + # (the dataset will be downloaded automatically from the datasets Hub + # + # For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this + # behavior (see below) + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset( + data_args.dataset_name, + data_args.dataset_config_name, + cache_dir=model_args.cache_dir, + token=model_args.token, + streaming=data_args.streaming, + ) + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + data_args.dataset_name, + data_args.dataset_config_name, + split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, + token=model_args.token, + streaming=data_args.streaming, + ) + raw_datasets["train"] = load_dataset( + data_args.dataset_name, + data_args.dataset_config_name, + split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, + token=model_args.token, + streaming=data_args.streaming, + ) + else: + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + if data_args.validation_file is not None: + data_files["validation"] = data_args.validation_file + extension = data_args.validation_file.split(".")[-1] + if extension == "txt": + extension = "text" + raw_datasets = load_dataset( + extension, + data_files=data_files, + cache_dir=model_args.cache_dir, + token=model_args.token, + ) + + # If no validation data is there, validation_split_percentage will be used to divide the dataset. + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + extension, + data_files=data_files, + split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, + token=model_args.token, + ) + raw_datasets["train"] = load_dataset( + extension, + data_files=data_files, + split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, + token=model_args.token, + ) + + # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at + # https://huggingface.co/docs/datasets/loading_datasets. + + # Load pretrained model and tokenizer + # + # Distributed training: + # The .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + config_kwargs = { + "cache_dir": model_args.cache_dir, + "revision": model_args.model_revision, + "token": model_args.token, + "trust_remote_code": model_args.trust_remote_code, + } + if model_args.config_name: + config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) + elif model_args.model_name_or_path: + config = AutoConfig.from_pretrained( + model_args.model_name_or_path, **config_kwargs + ) + else: + config = CONFIG_MAPPING[model_args.model_type]() + logger.warning("You are instantiating a new config instance from scratch.") + if model_args.config_overrides is not None: + logger.info(f"Overriding config: {model_args.config_overrides}") + config.update_from_string(model_args.config_overrides) + logger.info(f"New config: {config}") + + tokenizer_kwargs = { + "cache_dir": model_args.cache_dir, + "use_fast": model_args.use_fast_tokenizer, + "revision": model_args.model_revision, + "token": model_args.token, + "trust_remote_code": model_args.trust_remote_code, + } + if model_args.tokenizer_name: + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name, **tokenizer_kwargs + ) + elif model_args.model_name_or_path: + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, **tokenizer_kwargs + ) + else: + raise ValueError( + "You are instantiating a new tokenizer from scratch. This is not supported by this script. " + "You can do it from another script, save it, and load it from here, using --tokenizer_name." + ) + + # blank, eos, mask + if tokenizer.mask_token is None: + if custom_args.mask_token_type == "blank": + tokenizer.mask_token = "_" + elif custom_args.mask_token_type == "eos": + tokenizer.mask_token = tokenizer.eos_token + elif custom_args.mask_token_type == "mask": + tokenizer.add_tokens(["