-
Notifications
You must be signed in to change notification settings - Fork 0
/
triplet_image_loader.py
45 lines (39 loc) · 1.87 KB
/
triplet_image_loader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from PIL import Image
import os
import os.path
import torch.utils.data
import torchvision.transforms as transforms
def default_image_loader(path):
return Image.open(path).convert('RGB')
class TripletImageLoader(torch.utils.data.Dataset):
def __init__(self, base_path, filenames_filename, triplets_file_name, transform=None,
loader=default_image_loader):
""" filenames_filename: A text file with each line containing the path to an image e.g.,
images/class1/sample.jpg
triplets_file_name: A text file with each line containing three integers,
where integer i refers to the i-th image in the filenames file.
For a line of intergers 'a b c', a triplet is defined such that image a is more
similar to image c than it is to image b, e.g.,
0 2017 42 """
self.base_path = base_path
self.filenamelist = []
for line in open(filenames_filename):
self.filenamelist.append(line.rstrip('\n'))
triplets = []
for line in open(triplets_file_name):
triplets.append((line.split()[0], line.split()[1], line.split()[2])) # anchor, far, close
self.triplets = triplets
self.transform = transform
self.loader = loader
def __getitem__(self, index):
path1, path2, path3 = self.triplets[index]
img1 = self.loader(os.path.join(self.base_path,self.filenamelist[int(path1)]))
img2 = self.loader(os.path.join(self.base_path,self.filenamelist[int(path2)]))
img3 = self.loader(os.path.join(self.base_path,self.filenamelist[int(path3)]))
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
return img1, img2, img3
def __len__(self):
return len(self.triplets)