Skip to content

Commit

Permalink
add experiment to run retweet experiment
Browse files Browse the repository at this point in the history
  • Loading branch information
iLampard committed Nov 14, 2024
1 parent 64ea3fe commit e274406
Show file tree
Hide file tree
Showing 3 changed files with 281 additions and 55 deletions.
55 changes: 0 additions & 55 deletions examples/benchmark_script.py

This file was deleted.

255 changes: 255 additions & 0 deletions examples/train_experiment/retweet_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,255 @@
pipeline_config_id: runner_config

data:
retweet:
data_format: json
train_dir: easytpp/retweet
valid_dir: easytpp/retweet
test_dir: easytpp/retweet
data_specs:
num_event_types: 3
pad_token_id: 3
padding_side: right
truncation_side: right

NHP_train:
base_config:
stage: train
backend: torch
dataset_id: retweet
runner_id: std_tpp
model_id: NHP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 20
shuffle: False
optimizer: adam
learning_rate: 1.e-3
valid_freq: 1
use_tfb: False
metrics: [ 'acc', 'rmse' ]
seed: 2019
gpu: -1
model_config:
hidden_size: 64
loss_integral_num_sample_per_step: 20
thinning:
num_seq: 10
num_sample: 1
num_exp: 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 1



SAHP_train:
base_config:
stage: train
backend: torch
dataset_id: taxi
runner_id: std_tpp
model_id: SAHP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 20
shuffle: False
optimizer: adam
learning_rate: 1.e-3
valid_freq: 1
use_tfb: False
metrics: [ 'acc', 'rmse' ]
seed: 2019
gpu: 0
model_config:
hidden_size: 32
time_emb_size: 16
num_layers: 2
num_heads: 2
loss_integral_num_sample_per_step: 20
use_ln: False
thinning:
num_seq: 10
num_sample: 1
num_exp: 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 1



SAHP_gen:
base_config:
stage: gen
backend: torch
dataset_id: retweet
runner_id: std_tpp
model_id: SAHP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 1
model_config:
hidden_size: 16
time_emb_size: 4
num_layers: 2
num_heads: 2
loss_integral_num_sample_per_step: 20
use_ln: False
thinning:
num_seq: 10
num_sample: 1
num_exp: 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 10

THP_train:
base_config:
stage: train
backend: torch
dataset_id: taxi
runner_id: std_tpp
model_id: THP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 30
shuffle: False
optimizer: adam
learning_rate: 1.e-3
valid_freq: 1
use_tfb: False
metrics: [ 'acc', 'rmse' ]
seed: 2019
gpu: -1
model_config:
hidden_size: 32
time_emb_size: 16
num_layers: 2
num_heads: 2
mc_num_sample_per_step: 20
loss_integral_num_sample_per_step: 20
use_ln: False
thinning:
num_seq: 10
num_sample: 1
num_exp: 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 1


THP_gen:
base_config:
stage: gen
backend: torch
dataset_id: retweet
runner_id: std_tpp
model_id: THP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 1
model_config:
hidden_size: 32
time_emb_size: 16
num_layers: 2
num_heads: 2
mc_num_sample_per_step: 20
loss_integral_num_sample_per_step: 20
use_ln: False
# pretrained_model_dir: ./checkpoints/2694_4384867712_230603-160544/models/saved_model
thinning:
num_seq: 10
num_sample: 1
num_exp: 500 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 10

AttNHP_train:
base_config:
stage: train
backend: torch
dataset_id: taxi
runner_id: std_tpp
model_id: AttNHP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 200
shuffle: False
optimizer: adam
learning_rate: 1.e-3
valid_freq: 1
use_tfb: False
metrics: [ 'acc', 'rmse' ]
seed: 2019
gpu: -1
model_config:
hidden_size: 16
time_emb_size: 4
num_layers: 2
num_heads: 2
loss_integral_num_sample_per_step: 10
use_ln: False
thinning:
num_seq: 2
num_sample: 1
num_exp: 50 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 1


AttNHP_gen:
base_config:
stage: gen
backend: torch
dataset_id: retweet
runner_id: std_tpp
model_id: AttNHP # model name
base_dir: './checkpoints/'
trainer_config:
batch_size: 256
max_epoch: 1
model_config:
hidden_size: 16
time_emb_size: 4
num_layers: 2
num_heads: 2
mc_num_sample_per_step: 20
loss_integral_num_sample_per_step: 20
use_ln: False
# pretrained_model_dir: ./checkpoints/6934_4375315840_230603-222826/models/saved_model
thinning:
num_seq: 10
num_sample: 1
num_exp: 50 # number of i.i.d. Exp(intensity_bound) draws at one time in thinning algorithm
look_ahead_time: 10
patience_counter: 5 # the maximum iteration used in adaptive thinning
over_sample_rate: 5
num_samples_boundary: 5
dtime_max: 5
num_step_gen: 10
26 changes: 26 additions & 0 deletions examples/train_experiment/run_retweet.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import argparse

from easy_tpp.config_factory import Config
from easy_tpp.runner import Runner


def main():
parser = argparse.ArgumentParser()

parser.add_argument('--config_dir', type=str, required=False, default='retweet_config.yaml',
help='Dir of configuration yaml to train and evaluate the model.')

parser.add_argument('--experiment_id', type=str, required=False, default='NHP_train',
help='Experiment id in the config file.')

args = parser.parse_args()

config = Config.build_from_yaml_file(args.config_dir, experiment_id=args.experiment_id)

model_runner = Runner.build_from_config(config)

model_runner.run()


if __name__ == '__main__':
main()

0 comments on commit e274406

Please sign in to comment.