forked from liulingbo918/ATFM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_taxinyc.py
126 lines (105 loc) · 3.27 KB
/
test_taxinyc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import math, os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch import optim
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
from model.spn import ModelAttentionWithTimeaware as Model
from dataset.dataset import DatasetFactory
mask = torch.FloatTensor(
[[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 1, 1, 1, 1],
[0, 1, 1, 0, 1]]
).cuda().view(1, 1, 15, 5)
class DataConfiguration:
# Data
name = 'TaxiNYC'
portion = 1. # portion of data
len_close = 4
len_period = 2
len_trend = 0
pad_forward_period = 0
pad_back_period = 0
pad_forward_trend = 0
pad_back_trend = 0
len_all_close = len_close * 1
len_all_period = len_period * (1 + pad_back_period + pad_forward_period)
len_all_trend = len_trend * (1 + pad_back_trend + pad_forward_trend)
len_seq = len_all_close + len_all_period + len_all_trend
cpt = [len_all_close, len_all_period, len_all_trend]
interval_period = 1
interval_trend = 7
ext_flag = True
ext_time_flag = True
rm_incomplete_flag = True
fourty_eight = True
previous_meteorol = True
ext_dim = 77
dim_h = 15
dim_w = 5
def test(dconf):
np.random.seed(777)
torch.manual_seed(777)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
ds_factory = DatasetFactory(dconf)
test_ds = ds_factory.get_test_dataset()
test_loader = DataLoader(
dataset=test_ds,
batch_size=16,
shuffle=False,
num_workers=1
)
model = Model(dconf)
try:
model.load_state_dict(torch.load('pretrained/TaxiNYC/model'))
except:
model = torch.load('pretrained/TaxiNYC/model')
model = model.cuda()
criterion = nn.MSELoss().cuda()
model.eval()
mse = 0.0
mse_in = 0.0
mse_out = 0.0
mmn = ds_factory.ds.mmn
with torch.no_grad():
for i, (X, X_ext, Y, Y_ext) in enumerate(test_loader, 0):
X = X.cuda()
X_ext = X_ext.cuda()
Y = Y.cuda()
Y_ext = Y_ext.cuda()
h = model(X, X_ext, Y_ext)
Y = Y * mask
h = h * mask
loss = criterion(h, Y)
mse += X.size()[0] * loss.item()
mse_in += X.size()[0] * torch.mean((Y[:, 0] - h[:, 0]) * (Y[:, 0] - h[:, 0])).item()
mse_out += X.size()[0] * torch.mean((Y[:, 1] - h[:, 1]) * (Y[:, 1] - h[:, 1])).item()
cnt = ds_factory.ds.X_test.shape[0]
mse /= cnt
rmse = math.sqrt(mse) * (mmn.max - mmn.min) / 2. * ds_factory.dataset.m_factor
print("rmse: %.4f" % (rmse))
mse_in /= cnt
rmse_in = math.sqrt(mse_in) * (mmn.max - mmn.min) / 2. * ds_factory.dataset.m_factor
mse_out /= cnt
rmse_out = math.sqrt(mse_out) * (mmn.max - mmn.min) / 2. * ds_factory.dataset.m_factor
print("inflow rmse: %.4f outflow rmse: %.4f" % (rmse_in, rmse_out))
if __name__ == '__main__':
dconf = DataConfiguration()
test(dconf)