1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
| import torch import torchvision from PIL import Image import numpy as np
def voc_label_indices(colormap, colormap2label): """ convert colormap (PIL image) to colormap2label (uint8 tensor). """ colormap = np.array(colormap.convert("RGB")).astype('int32') idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256 + colormap[:, :, 2]) return colormap2label[idx]
def read_voc_images(root="./dataset/VOCdevkit/VOC2012", is_train=True, max_num=None): txt_fname = '%s/ImageSets/Segmentation/%s' % ( root, 'train.txt' if is_train else 'val.txt') with open(txt_fname, 'r') as f: images = f.read().split() if max_num is not None: images = images[:min(max_num, len(images))] features, labels = [None] * len(images), [None] * len(images) for i, fname in enumerate(images): features[i] = Image.open('%s/JPEGImages/%s.jpg' % (root, fname)).convert("RGB") labels[i] = Image.open('%s/SegmentationClass/%s.png' % (root, fname)).convert("RGB") return features, labels
def voc_rand_crop(feature, label, height, width): """ Random crop feature (PIL image) and label (PIL image). """ i, j, h, w = torchvision.transforms.RandomCrop.get_params( feature, output_size=(height, width))
feature = torchvision.transforms.functional.crop(feature, i, j, h, w) label = torchvision.transforms.functional.crop(label, i, j, h, w)
return feature, label
class VOCSegDataset(torch.utils.data.Dataset): def __init__(self, is_train, crop_size, voc_dir, colormap2label, max_num=None): """ crop_size: (h, w) """ self.rgb_mean = np.array([0.485, 0.456, 0.406]) self.rgb_std = np.array([0.229, 0.224, 0.225]) self.tsf = torchvision.transforms.Compose([ torchvision.transforms.ToTensor(), torchvision.transforms.Normalize(mean=self.rgb_mean, std=self.rgb_std) ])
self.crop_size = crop_size features, labels = read_voc_images(root=voc_dir, is_train=is_train, max_num=max_num) self.features = self.filter(features) self.labels = self.filter(labels) self.colormap2label = colormap2label print('read ' + str(len(self.features)) + ' valid examples')
def filter(self, imgs): return [img for img in imgs if ( img.size[1] >= self.crop_size[0] and img.size[0] >= self.crop_size[1])]
def __getitem__(self, idx): feature, label = voc_rand_crop(self.features[idx], self.labels[idx], *self.crop_size) label = voc_label_indices(label, self.colormap2label).numpy().astype('uint8')
h, w = label.shape target = torch.zeros(21, h, w) for c in range(21): target[c][label == c] = 1
return (self.tsf(feature), target)
def __len__(self): return len(self.features)
def VOC2012SegDataIter(batch_size=64, crop_size=(320, 480), num_workers=4, max_num=None): VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
colormap2label = torch.zeros(256 ** 3, dtype=torch.uint8) for i, colormap in enumerate(VOC_COLORMAP): colormap2label[(colormap[0] * 256 + colormap[1]) * 256 + colormap[2]] = i
voc_train = VOCSegDataset(True, crop_size, "../dataset/VOCdevkit/VOC2012", colormap2label, max_num) voc_val = VOCSegDataset(False, crop_size, "../dataset/VOCdevkit/VOC2012", colormap2label, max_num) train_iter = torch.utils.data.DataLoader(voc_train, batch_size, shuffle=True, drop_last=True, num_workers=num_workers) val_iter = torch.utils.data.DataLoader(voc_val, batch_size, drop_last=True, num_workers=num_workers) return train_iter, val_iter
|