Skip to content

Commit

Permalink
fixes 8
Browse files Browse the repository at this point in the history
  • Loading branch information
Almaz Dautov committed Nov 22, 2024
1 parent 3d76e92 commit 0ec44f9
Show file tree
Hide file tree
Showing 7 changed files with 236 additions and 200 deletions.
372 changes: 186 additions & 186 deletions tests/fixtures/configs/train/reinforce/gemma.json
Original file line number Diff line number Diff line change
@@ -1,188 +1,188 @@
{
"train_dataset_settings": {
"sources": [
{
"name": "train_chat",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 2000,
"only_answer_loss": true
},
"val_dataset_settings": {
"sources": [
{
"name": "val_chat",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 2000,
"only_answer_loss": true
},
"cherry_pick_settings": {
"generator_transformers_settings": {
"num_beams": 1,
"do_sample": false,
"stop_strings": "</RS>",
"max_new_tokens": 8
},
"custom_generation_settings": {
"skip_special_tokens": false
},
"dataset_settings": {
"sources": [
{
"name": "chat_test",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"num_samples": 2
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 150,
"only_answer_loss": true
},
"metric_settings": [
{
"type": "length",
"parameters": {
"need_average": [
true
]
}
},
{
"type": "kl",
"parameters": {
"need_average": [
true
],
"ref_logits_type": "sft"
}
},
{
"type": "kl",
"parameters": {
"need_average": [
true
],
"ref_logits_type": "reference"
}
}
]
},
"reward_model_settings": {
"model_path": "/from_s3/llama-3.2-1B",
"model_type": "seq_cls",
"model_kwargs": {
"num_labels": 1,
"attn_implementation": "flash_attention_2"
},
"transformers_settings": {}
},
"model_settings": {
"model_path": "/from_s3/llama-3.2-1B",
"model_type": "causal",
"model_kwargs": {
"attn_implementation": "flash_attention_2"
},
"transformers_settings": {},
"liger_kernels_settings": {}
},
"tokenizer_settings": {
"tokenizer_kwargs": {
"padding_side": "left"
}
},
"trainer_settings": {
"actor_settings": {
"actor_type": "distributed_vllm",
"vllm_num_engines": 2,
"vllm_tensor_parallel_size": 1
},
"deepspeed": "tests/fixtures/configs/train/reinforce/deepspeed_cfg.json",
"gradient_checkpointing": true,
"gradient_checkpointing_kwargs": {
"use_reentrant": false
},
"critic_type": "ray_transformers",
"reward_processor_type": "rloo",
"evaluation_strategy": "steps",
"num_generations": 4,
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
"gradient_accumulation_steps": 1,
"adam_beta1": 0.9,
"adam_beta2": 0.95,
"adam_epsilon": 0.00001,
"eval_steps": 10,
"save_steps": 100,
"save_strategy": "steps",
"load_best_model_at_end": false,
"logging_steps": 1,
"learning_rate": 0.00001,
"max_steps": 1001,
"lr_scheduler_type": "constant_with_warmup",
"warmup_ratio": 0.03,
"fp16": false,
"bf16": true,
"optim": "adamw_torch",
"weight_decay": 0.0,
"max_grad_norm": 2,
"save_total_limit": 11,
"dataloader_num_workers": 12,
"stop_token": "<end_of_turn>",
"temperature": 0.7,
"non_eos_penalty": true,
"penalty_reward_value": -1,
"clip_rewards_min": -1e+8,
"clip_rewards_max": 1e+8,
"whiten_rewards": false,
"kl_coef": 0.05,
"mean_baseline_coef": 0.95,
"num_samples_for_reward_stats": 1000,
"no_cuda": false
},
"special_tokens_settings": {
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": "<unk>"
},
"logging_settings": {
"project_name": "alignment",
"run_name": "sft",
"entity": "turbo-alignment"
},
"seed": 0,
"log_path": "train_output"
"train_dataset_settings": {
"sources": [
{
"name": "train_chat",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 2000,
"only_answer_loss": true
},
"val_dataset_settings": {
"sources": [
{
"name": "val_chat",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"sample_rate": 1.0
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 2000,
"only_answer_loss": true
},
"cherry_pick_settings": {
"generator_transformers_settings": {
"num_beams": 1,
"do_sample": false,
"stop_strings": "</RS>",
"max_new_tokens": 8
},
"custom_generation_settings": {
"skip_special_tokens": false
},
"dataset_settings": {
"sources": [
{
"name": "chat_test",
"records_path": "tests/fixtures/datasets/chat/train_chat.jsonl",
"num_samples": 2
}
],
"prompt_template": {
"role_tag_mapping": {
"bot": "assistant",
"user": "user",
"system": "system"
},
"prefix_template": "<|start_header_id|>{role}<|end_header_id|>\n\n",
"suffix_template": "<|eot_id|>"
},
"dataset_type": "chat",
"max_tokens_count": 150,
"only_answer_loss": true
},
"metric_settings": [
{
"type": "length",
"parameters": {
"need_average": [
true
]
}
},
{
"type": "kl",
"parameters": {
"need_average": [
true
],
"ref_logits_type": "sft"
}
},
{
"type": "kl",
"parameters": {
"need_average": [
true
],
"ref_logits_type": "reference"
}
}
]
},
"reward_model_settings": {
"model_path": "/from_s3/llama-3.2-1B",
"model_type": "seq_cls",
"model_kwargs": {
"num_labels": 1,
"attn_implementation": "flash_attention_2"
},
"transformers_settings": {}
},
"model_settings": {
"model_path": "/from_s3/llama-3.2-1B",
"model_type": "causal",
"model_kwargs": {
"attn_implementation": "flash_attention_2"
},
"transformers_settings": {},
"liger_kernels_settings": {}
},
"tokenizer_settings": {
"tokenizer_kwargs": {
"padding_side": "left"
}
},
"trainer_settings": {
"actor_settings": {
"actor_type": "distributed_vllm",
"vllm_num_engines": 2,
"vllm_tensor_parallel_size": 1
},
"deepspeed": "tests/fixtures/configs/train/reinforce/deepspeed_cfg.json",
"gradient_checkpointing": true,
"gradient_checkpointing_kwargs": {
"use_reentrant": false
},
"critic_type": "ray_transformers",
"reward_processor_type": "rloo",
"evaluation_strategy": "steps",
"num_generations": 4,
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 1,
"gradient_accumulation_steps": 1,
"adam_beta1": 0.9,
"adam_beta2": 0.95,
"adam_epsilon": 0.00001,
"eval_steps": 10,
"save_steps": 100,
"save_strategy": "steps",
"load_best_model_at_end": false,
"logging_steps": 1,
"learning_rate": 0.00001,
"max_steps": 1001,
"lr_scheduler_type": "constant_with_warmup",
"warmup_ratio": 0.03,
"fp16": false,
"bf16": true,
"optim": "adamw_torch",
"weight_decay": 0.0,
"max_grad_norm": 2,
"save_total_limit": 11,
"dataloader_num_workers": 12,
"stop_token": "<end_of_turn>",
"temperature": 0.7,
"non_eos_penalty": true,
"penalty_reward_value": -1,
"clip_rewards_min": -1e+8,
"clip_rewards_max": 1e+8,
"whiten_rewards": false,
"kl_coef": 0.05,
"mean_baseline_coef": 0.95,
"num_samples_for_reward_stats": 1000,
"no_cuda": false
},
"special_tokens_settings": {
"bos_token": "<s>",
"eos_token": "</s>",
"pad_token": "<unk>"
},
"logging_settings": {
"project_name": "alignment",
"run_name": "sft",
"entity": "turbo-alignment"
},
"seed": 0,
"log_path": "train_output"
}
2 changes: 1 addition & 1 deletion turbo_alignment/cli/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def reinforce_training(

experiment_settings = pipeline_settings.REINFORCETrainExperimentSettings.parse_file(experiment_settings_path)

policy_models = RayGroup(num_nodes=1, num_gpus_per_node=8, ray_actor_type=pipelines.TrainREINFORCEStrategy)
policy_models = RayGroup(num_nodes=2, num_gpus_per_node=8, ray_actor_type=pipelines.TrainREINFORCEStrategy)#64.19 GiB is allocated by PyTorch, and 3.40 GiB
reward_model = RayGroup(num_nodes=1, num_gpus_per_node=1, ray_actor_type=RewardModel)
reference_model = RayGroup(num_nodes=1, num_gpus_per_node=1, ray_actor_type=ReferenceModel)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ def __init__(self, world_size, rank, local_rank, master_addr, master_port):
@staticmethod
def _get_current_node_ip():
address = ray._private.services.get_node_ip_address()
print(f'dist ray actor DEBUG: {address=}', flush=True)
# strip ipv6 address
return address.strip("[]")

Expand Down
Loading

0 comments on commit 0ec44f9

Please sign in to comment.