-
Notifications
You must be signed in to change notification settings - Fork 11
/
geniter.py
92 lines (76 loc) · 4.1 KB
/
geniter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import torch
import numpy as np
import torch.utils.data as Data
def index_assignment(index, row, col, pad_length):
new_assign = {}
for counter, value in enumerate(index):
assign_0 = value // col + pad_length
assign_1 = value % col + pad_length
new_assign[counter] = [assign_0, assign_1]
return new_assign
def select_patch(matrix, pos_row, pos_col, ex_len):
selected_rows = matrix[range(pos_row-ex_len, pos_row+ex_len+1)]
selected_patch = selected_rows[:, range(pos_col-ex_len, pos_col+ex_len+1)]
return selected_patch
def select_small_cubic(data_size, data_indices, whole_data, patch_length, padded_data, dimension):
small_cubic_data = np.zeros((data_size, 2 * patch_length + 1, 2 * patch_length + 1, dimension))
data_assign = index_assignment(data_indices, whole_data.shape[0], whole_data.shape[1], patch_length)
for i in range(len(data_assign)):
small_cubic_data[i] = select_patch(padded_data, data_assign[i][0], data_assign[i][1], patch_length)
return small_cubic_data
def generate_iter(TRAIN_SIZE, train_indices, TEST_SIZE, test_indices, TOTAL_SIZE, total_indices, VAL_SIZE,
whole_data, PATCH_LENGTH, padded_data, INPUT_DIMENSION, batch_size, gt):
gt_all = gt[total_indices] - 1
y_train = gt[train_indices] - 1
y_test = gt[test_indices] - 1
all_data = select_small_cubic(TOTAL_SIZE, total_indices, whole_data,
PATCH_LENGTH, padded_data, INPUT_DIMENSION)
train_data = select_small_cubic(TRAIN_SIZE, train_indices, whole_data,
PATCH_LENGTH, padded_data, INPUT_DIMENSION)
print(train_data.shape)
test_data = select_small_cubic(TEST_SIZE, test_indices, whole_data,
PATCH_LENGTH, padded_data, INPUT_DIMENSION)
x_train = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2], INPUT_DIMENSION)
x_test_all = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], INPUT_DIMENSION)
x_val = x_test_all[-VAL_SIZE:]
y_val = y_test[-VAL_SIZE:]
x_test = x_test_all[:-VAL_SIZE]
y_test = y_test[:-VAL_SIZE]
x1_tensor_train = torch.from_numpy(x_train).type(torch.FloatTensor).unsqueeze(1)
y1_tensor_train = torch.from_numpy(y_train).type(torch.FloatTensor)
torch_dataset_train = Data.TensorDataset(x1_tensor_train, y1_tensor_train)
x1_tensor_valida = torch.from_numpy(x_val).type(torch.FloatTensor).unsqueeze(1)
y1_tensor_valida = torch.from_numpy(y_val).type(torch.FloatTensor)
torch_dataset_valida = Data.TensorDataset(x1_tensor_valida, y1_tensor_valida)
x1_tensor_test = torch.from_numpy(x_test).type(torch.FloatTensor).unsqueeze(1)
y1_tensor_test = torch.from_numpy(y_test).type(torch.FloatTensor)
torch_dataset_test = Data.TensorDataset(x1_tensor_test,y1_tensor_test)
all_data.reshape(all_data.shape[0], all_data.shape[1], all_data.shape[2], INPUT_DIMENSION)
all_tensor_data = torch.from_numpy(all_data).type(torch.FloatTensor).unsqueeze(1)
all_tensor_data_label = torch.from_numpy(gt_all).type(torch.FloatTensor)
torch_dataset_all = Data.TensorDataset(all_tensor_data, all_tensor_data_label)
train_iter = Data.DataLoader(
dataset=torch_dataset_train, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True,
num_workers=0,
)
valiada_iter = Data.DataLoader(
dataset=torch_dataset_valida, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=True,
num_workers=0,
)
test_iter = Data.DataLoader(
dataset=torch_dataset_test, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=False,
num_workers=0,
)
all_iter = Data.DataLoader(
dataset=torch_dataset_all, # torch TensorDataset format
batch_size=batch_size, # mini batch size
shuffle=False,
num_workers=0,
)
return train_iter, valiada_iter, test_iter, all_iter #, y_test