-
Notifications
You must be signed in to change notification settings - Fork 55
/
Copy pathdataset.py
executable file
·59 lines (46 loc) · 1.6 KB
/
dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
from io import BytesIO
import lmdb
from PIL import Image
from torch.utils.data import Dataset
import os
import torchvision
class MultiResolutionDataset(Dataset):
def __init__(self, path, transform, resolution=256):
self.env = lmdb.open(
path,
max_readers=32,
readonly=True,
lock=False,
readahead=False,
meminit=False,
)
if not self.env:
raise IOError('Cannot open lmdb dataset', path)
with self.env.begin(write=False) as txn:
self.length = int(txn.get('length'.encode('utf-8')).decode('utf-8'))
self.resolution = resolution
self.transform = transform
def __len__(self):
return self.length
def __getitem__(self, index):
with self.env.begin(write=False) as txn:
key = f'{self.resolution}-{str(index).zfill(5)}'.encode('utf-8')
img_bytes = txn.get(key)
buffer = BytesIO(img_bytes)
img = Image.open(buffer)
img = self.transform(img)
return img
class ImageFolder(Dataset):
def __init__(self, root, transform, crop=False):
self.imgpaths = [os.path.join(root, x) for x in os.listdir(root)]
self.transform = transform
self.crop = crop
def __getitem__(self, idx):
path = self.imgpaths[idx]
image = Image.open(path)
width, height = image.size
if self.crop:
image = torchvision.transforms.functional.crop(image, 0, 0, int(4/5*height), width)
return self.transform(image)
def __len__(self):
return len(self.imgpaths)