-
Notifications
You must be signed in to change notification settings - Fork 37
/
celeba.py
64 lines (50 loc) · 1.95 KB
/
celeba.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import torch
import torch.utils.data as data
from PIL import Image
import os
import os.path
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def accimage_loader(path):
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path):
from torchvision import get_image_backend
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class CelebA(data.Dataset):
def __init__(self, root, ann_file, transform=None, target_transform=None, loader=default_loader):
images = []
targets = []
for line in open(os.path.join(root, ann_file), 'r'):
sample = line.split()
if len(sample) != 41:
raise(RuntimeError("# Annotated face attributes of CelebA dataset should not be different from 40"))
images.append(sample[0])
targets.append([int(i) for i in sample[1:]])
self.images = [os.path.join(root, 'img_align_celeba_png', img) for img in images]
self.targets = targets
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
path = self.images[index]
sample = self.loader(path)
target = self.targets[index]
target = torch.LongTensor(target)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return len(self.images)