-
Notifications
You must be signed in to change notification settings - Fork 44
/
Copy pathDualMLP_avazu_x1_010_1864e8b3.log
141 lines (141 loc) · 9.09 KB
/
DualMLP_avazu_x1_010_1864e8b3.log
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
2022-12-16 23:26:41,287 P83727 INFO Params: {
"batch_size": "4096",
"data_format": "csv",
"data_root": "../data/Avazu/",
"dataset_id": "avazu_x1_0bbde04e",
"debug_mode": "False",
"early_stop_patience": "2",
"embedding_dim": "10",
"embedding_regularizer": "0.05",
"epochs": "100",
"eval_interval": "1",
"feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
"feature_specs": "None",
"gpu": "3",
"group_id": "None",
"label_col": "{'dtype': 'float', 'name': 'label'}",
"learning_rate": "0.001",
"loss": "binary_crossentropy",
"metrics": "['AUC', 'logloss']",
"min_categr_count": "1",
"mlp1_batch_norm": "True",
"mlp1_dropout": "0.3",
"mlp1_hidden_activations": "relu",
"mlp1_hidden_units": "[400, 400, 400]",
"mlp2_batch_norm": "True",
"mlp2_dropout": "0.3",
"mlp2_hidden_activations": "relu",
"mlp2_hidden_units": "[400]",
"model": "DualMLP",
"model_id": "DualMLP_avazu_x1_010_1864e8b3",
"model_root": "./checkpoints/DualMLP_avazu_x1/",
"monitor": "AUC",
"monitor_mode": "max",
"net_regularizer": "0",
"num_workers": "3",
"optimizer": "adam",
"ordered_features": "None",
"pickle_feature_encoder": "True",
"save_best_only": "True",
"seed": "2021",
"shuffle": "True",
"task": "binary_classification",
"test_data": "../data/Avazu/Avazu_x1/test.csv",
"train_data": "../data/Avazu/Avazu_x1/train.csv",
"valid_data": "../data/Avazu/Avazu_x1/valid.csv",
"verbose": "0"
}
2022-12-16 23:26:41,288 P83727 INFO Load feature_map from json: ../data/Avazu/avazu_x1_0bbde04e/feature_map.json
2022-12-16 23:26:41,289 P83727 INFO Set column index...
2022-12-16 23:26:41,289 P83727 INFO Feature specs: {
"feat_1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 8, 'vocab_size': 9}",
"feat_10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 1048284, 'vocab_size': 1048285}",
"feat_11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 6514, 'vocab_size': 6515}",
"feat_12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 5, 'vocab_size': 6}",
"feat_13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 5, 'vocab_size': 6}",
"feat_14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 1939, 'vocab_size': 1940}",
"feat_15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 9, 'vocab_size': 10}",
"feat_16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 10, 'vocab_size': 11}",
"feat_17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 348, 'vocab_size': 349}",
"feat_18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 5, 'vocab_size': 6}",
"feat_19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 60, 'vocab_size': 61}",
"feat_2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 8, 'vocab_size': 9}",
"feat_20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 170, 'vocab_size': 171}",
"feat_21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 51, 'vocab_size': 52}",
"feat_22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 25, 'vocab_size': 26}",
"feat_3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 3479, 'vocab_size': 3480}",
"feat_4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 4270, 'vocab_size': 4271}",
"feat_5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 25, 'vocab_size': 26}",
"feat_6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 4863, 'vocab_size': 4864}",
"feat_7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 304, 'vocab_size': 305}",
"feat_8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 32, 'vocab_size': 33}",
"feat_9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'oov_idx': 228185, 'vocab_size': 228186}"
}
2022-12-16 23:26:44,737 P83727 INFO Total number of parameters: 13487812.
2022-12-16 23:26:44,737 P83727 INFO Loading data...
2022-12-16 23:26:44,737 P83727 INFO Loading data from h5: ../data/Avazu/avazu_x1_0bbde04e/train.h5
2022-12-16 23:26:56,565 P83727 INFO Train samples: total/28300276, blocks/1
2022-12-16 23:26:56,565 P83727 INFO Loading data from h5: ../data/Avazu/avazu_x1_0bbde04e/valid.h5
2022-12-16 23:26:58,276 P83727 INFO Validation samples: total/4042897, blocks/1
2022-12-16 23:26:58,276 P83727 INFO Loading train and validation data done.
2022-12-16 23:26:58,276 P83727 INFO Start training: 6910 batches/epoch
2022-12-16 23:26:58,276 P83727 INFO ************ Epoch=1 start ************
2022-12-16 23:30:29,580 P83727 INFO [Metrics] AUC: 0.733865
2022-12-16 23:30:29,583 P83727 INFO Save best model: monitor(max): 0.733865
2022-12-16 23:30:29,679 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:30:29,746 P83727 INFO Train loss @epoch 1: 0.445857
2022-12-16 23:30:29,746 P83727 INFO ************ Epoch=1 end ************
2022-12-16 23:34:01,372 P83727 INFO [Metrics] AUC: 0.736661
2022-12-16 23:34:01,374 P83727 INFO Save best model: monitor(max): 0.736661
2022-12-16 23:34:01,484 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:34:01,526 P83727 INFO Train loss @epoch 2: 0.438708
2022-12-16 23:34:01,527 P83727 INFO ************ Epoch=2 end ************
2022-12-16 23:37:32,715 P83727 INFO [Metrics] AUC: 0.738517
2022-12-16 23:37:32,718 P83727 INFO Save best model: monitor(max): 0.738517
2022-12-16 23:37:32,824 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:37:32,869 P83727 INFO Train loss @epoch 3: 0.438277
2022-12-16 23:37:32,869 P83727 INFO ************ Epoch=3 end ************
2022-12-16 23:41:03,903 P83727 INFO [Metrics] AUC: 0.734888
2022-12-16 23:41:03,905 P83727 INFO Monitor(max) STOP: 0.734888 !
2022-12-16 23:41:03,906 P83727 INFO Reduce learning rate on plateau: 0.000100
2022-12-16 23:41:03,906 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:41:03,952 P83727 INFO Train loss @epoch 4: 0.438617
2022-12-16 23:41:03,952 P83727 INFO ************ Epoch=4 end ************
2022-12-16 23:44:36,476 P83727 INFO [Metrics] AUC: 0.746693
2022-12-16 23:44:36,478 P83727 INFO Save best model: monitor(max): 0.746693
2022-12-16 23:44:36,581 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:44:36,624 P83727 INFO Train loss @epoch 5: 0.409730
2022-12-16 23:44:36,624 P83727 INFO ************ Epoch=5 end ************
2022-12-16 23:48:08,541 P83727 INFO [Metrics] AUC: 0.746070
2022-12-16 23:48:08,543 P83727 INFO Monitor(max) STOP: 0.746070 !
2022-12-16 23:48:08,543 P83727 INFO Reduce learning rate on plateau: 0.000010
2022-12-16 23:48:08,544 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:48:08,589 P83727 INFO Train loss @epoch 6: 0.411585
2022-12-16 23:48:08,589 P83727 INFO ************ Epoch=6 end ************
2022-12-16 23:51:48,305 P83727 INFO [Metrics] AUC: 0.747098
2022-12-16 23:51:48,307 P83727 INFO Save best model: monitor(max): 0.747098
2022-12-16 23:51:48,408 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:51:48,459 P83727 INFO Train loss @epoch 7: 0.397747
2022-12-16 23:51:48,460 P83727 INFO ************ Epoch=7 end ************
2022-12-16 23:55:28,034 P83727 INFO [Metrics] AUC: 0.744478
2022-12-16 23:55:28,038 P83727 INFO Monitor(max) STOP: 0.744478 !
2022-12-16 23:55:28,038 P83727 INFO Reduce learning rate on plateau: 0.000001
2022-12-16 23:55:28,039 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:55:28,104 P83727 INFO Train loss @epoch 8: 0.395220
2022-12-16 23:55:28,104 P83727 INFO ************ Epoch=8 end ************
2022-12-16 23:59:05,518 P83727 INFO [Metrics] AUC: 0.742902
2022-12-16 23:59:05,520 P83727 INFO Monitor(max) STOP: 0.742902 !
2022-12-16 23:59:05,520 P83727 INFO Reduce learning rate on plateau: 0.000001
2022-12-16 23:59:05,521 P83727 INFO ********* Epoch==9 early stop *********
2022-12-16 23:59:05,521 P83727 INFO --- 6910/6910 batches finished ---
2022-12-16 23:59:05,585 P83727 INFO Train loss @epoch 9: 0.390474
2022-12-16 23:59:05,586 P83727 INFO Training finished.
2022-12-16 23:59:05,586 P83727 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/DualMLP_avazu_x1/avazu_x1_0bbde04e/DualMLP_avazu_x1_010_1864e8b3.model
2022-12-16 23:59:05,640 P83727 INFO ****** Validation evaluation ******
2022-12-16 23:59:16,573 P83727 INFO [Metrics] AUC: 0.747098 - logloss: 0.395985
2022-12-16 23:59:16,722 P83727 INFO ******** Test evaluation ********
2022-12-16 23:59:16,722 P83727 INFO Loading data...
2022-12-16 23:59:16,723 P83727 INFO Loading data from h5: ../data/Avazu/avazu_x1_0bbde04e/test.h5
2022-12-16 23:59:20,276 P83727 INFO Test samples: total/8085794, blocks/1
2022-12-16 23:59:20,276 P83727 INFO Loading test data done.
2022-12-16 23:59:41,773 P83727 INFO [Metrics] AUC: 0.765685 - logloss: 0.366567