我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torchvision.transforms.Scale()。
def __call__(self, img): for attempt in range(10): area = img.size[0] * img.size[1] target_area = random.uniform(0.9, 1.) * area aspect_ratio = random.uniform(7. / 8, 8. / 7) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if random.random() < 0.5: w, h = h, w if w <= img.size[0] and h <= img.size[1]: x1 = random.randint(0, img.size[0] - w) y1 = random.randint(0, img.size[1] - h) img = img.crop((x1, y1, x1 + w, y1 + h)) assert (img.size == (w, h)) return img.resize((self.size, self.size), self.interpolation) # Fallback scale = Scale(self.size, interpolation=self.interpolation) crop = CenterCrop(self.size) return crop(scale(img))
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSizeX, opt.loadSizeY] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSizeX))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def get_loader(config): """Builds and returns Dataloader for MNIST and SVHN dataset.""" transform = transforms.Compose([ transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform) mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform) svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) return svhn_loader, mnist_loader
def __call__(self, img): for attempt in range(10): area = img.size[0] * img.size[1] target_area = random.uniform(0.70, 0.98) * area aspect_ratio = random.uniform(5. / 8, 8. / 5) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if random.random() < 0.5: w, h = h, w if w <= img.size[0] and h <= img.size[1]: x1 = random.randint(0, img.size[0] - w) y1 = random.randint(0, img.size[1] - h) img = img.crop((x1, y1, x1 + w, y1 + h)) assert (img.size == (w, h)) return img.resize((self.size, self.size), self.interpolation) # Fallback scale = Scale(self.size, interpolation=self.interpolation) crop = CenterCrop(self.size) return crop(scale(img))
def test_getitem(self): import torchvision.transforms as t from reid.datasets.viper import VIPeR from reid.utils.data.preprocessor import Preprocessor root, split_id, num_val = '/tmp/open-reid/viper', 0, 100 dataset = VIPeR(root, split_id=split_id, num_val=num_val, download=True) preproc = Preprocessor(dataset.train, root=dataset.images_dir, transform=t.Compose([ t.Scale(256), t.CenterCrop(224), t.ToTensor(), t.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])) self.assertEquals(len(preproc), len(dataset.train)) img, pid, camid = preproc[0] self.assertEquals(img.size(), (3, 224, 224))
def load_data(self): print('=' * 50) print('Loading data...') transform = transforms.Compose([ transforms.ImageOps.grayscale, transforms.Scale((cfg.img_width, cfg.img_height)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) syn_train_folder = torchvision.datasets.ImageFolder(root=cfg.syn_path, transform=transform) # print(syn_train_folder) self.syn_train_loader = Data.DataLoader(syn_train_folder, batch_size=cfg.batch_size, shuffle=True, pin_memory=True) print('syn_train_batch %d' % len(self.syn_train_loader)) real_folder = torchvision.datasets.ImageFolder(root=cfg.real_path, transform=transform) # real_folder.imgs = real_folder.imgs[:2000] self.real_loader = Data.DataLoader(real_folder, batch_size=cfg.batch_size, shuffle=True, pin_memory=True) print('real_batch %d' % len(self.real_loader))
def validate(self): validation_set = spatial_dataset(dic=self.dic_testing, root_dir=self.data_path, mode='val', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])) print '==> Validation data :',len(validation_set),'frames' print validation_set[1][1].size() val_loader = DataLoader( dataset=validation_set, batch_size=self.BATCH_SIZE, shuffle=False, num_workers=self.num_workers) return val_loader
def train(self): training_set = motion_dataset(dic=self.dic_video_train, in_channel=self.in_channel, root_dir=self.data_path, mode='train', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), ])) print '==> Training data :',len(training_set),' videos',training_set[1][0].size() train_loader = DataLoader( dataset=training_set, batch_size=self.BATCH_SIZE, shuffle=True, num_workers=self.num_workers, pin_memory=True ) return train_loader
def val(self): validation_set = motion_dataset(dic= self.dic_test_idx, in_channel=self.in_channel, root_dir=self.data_path , mode ='val', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), ])) print '==> Validation data :',len(validation_set),' frames',validation_set[1][1].size() #print validation_set[1] val_loader = DataLoader( dataset=validation_set, batch_size=self.BATCH_SIZE, shuffle=False, num_workers=self.num_workers) return val_loader
def imagenet_transform(scale_size=256, input_size=224, train=True, allow_var_size=False): normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} if train: return transforms.Compose([ transforms.Scale(scale_size), transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ]) elif allow_var_size: return transforms.Compose([ transforms.Scale(scale_size), transforms.ToTensor(), transforms.Normalize(**normalize) ]) else: return transforms.Compose([ transforms.Scale(scale_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def load_image_for_prediction(opt, image_path): """ load image for prediction, pre process as the same of train, and also return a dict :param opt: :param image_path: :return: """ image = Image.open(image_path) transformations = transforms.Compose([transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) image_tensor = transformations(image).float() image_tensor.unsqueeze_(0) return {'A': image_tensor, 'A_paths': image_path, 'B': image_tensor, 'B_paths': image_path}
def LSUN_loader(root, image_size, classes=['bedroom'], normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image classes = Default class is 'bedroom'. Other available classes are: 'bridge', 'church_outdoor', 'classroom', 'conference_room', 'dining_room', 'kitchen', 'living_room', 'restaurant', 'tower' normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.CenterCrop(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) for c in classes: c = c + '_train' lsun_data = dset.LSUN(db_path=root, classes=classes, transform=transforms.Compose(transformations)) return lsun_data
def transform(is_train=True, normalize=True): """ Returns a transform object """ filters = [] filters.append(Scale(256)) if is_train: filters.append(RandomCrop(224)) else: filters.append(CenterCrop(224)) if is_train: filters.append(RandomHorizontalFlip()) filters.append(ToTensor()) if normalize: filters.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])) return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False): self.crop_size = crop_size self.y_offset = y_offset self.flip = flip if self.flip: self.post_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) else: self.post_transform = transforms.Compose([ transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])
def transform_input(crop_size, upscale_factor): """LR of target image """ return Compose([ Scale(crop_size // upscale_factor), ]) # def transform_target_batch(crop_size): # def transform(image): # patches = extract_subimages(image, crop_size, crop_size) # patches = [ToTensor()(x) for x in patches] # return stack(patches, 0) # return transform # def transform_input_batch(crop_size, upscale_factor): # def transform(image): # patches = extract_subimages(image, crop_size, crop_size) # patches = [Compose([Scale(crop_size//upscale_factor), ToTensor()])(x) for x in patches] # return stack(patches, 0) # return transform
def load_data(resize): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ #Higher scale-up for inception transforms.Scale(int(max(resize)/224*256)), transforms.CenterCrop(max(resize)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'PlantVillage' dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'val']} dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']} dset_classes = dsets['train'].classes return dset_loaders['train'], dset_loaders['val']
def __init__(self, env): super(CartPoleWrapper, self).__init__() self.env = env.unwrapped self.resize = T.Compose([T.ToPILImage(), T.Scale(40, interpolation=Image.CUBIC), T.ToTensor()]) self.screen_width = 600 self.action_space = self.env.action_space
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list transforms.Compose(t_list)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) def jitter(x): ran = random.uniform(0.7, 1) return x * ran + 1 - ran STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Lambda(jitter), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def get_loader(config): """Builds and returns Dataloader for MNIST and SVHN dataset.""" transform = transforms.Compose([ transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='train') mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=True) svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='test') mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=False) svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) return svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader
def initialize(self, opt): BaseDataLoader.initialize(self, opt) transformations = [transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] transform = transforms.Compose(transformations) # Dataset A dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A', transform=transform, return_paths=True) data_loader_A = torch.utils.data.DataLoader( dataset_A, batch_size=self.opt.batchSize, shuffle=not self.opt.serial_batches, num_workers=int(self.opt.nThreads)) # Dataset B dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B', transform=transform, return_paths=True) data_loader_B = torch.utils.data.DataLoader( dataset_B, batch_size=self.opt.batchSize, shuffle=not self.opt.serial_batches, num_workers=int(self.opt.nThreads)) self.dataset_A = dataset_A self.dataset_B = dataset_B flip = opt.isTrain and not opt.no_flip self.paired_data = PairedData(data_loader_A, data_loader_B, self.opt.max_dataset_size, flip)
def initialize(self, opt): BaseDataLoader.initialize(self, opt) self.fineSize = opt.fineSize transformations = [ # TODO: Scale transforms.Scale(opt.loadSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] transform = transforms.Compose(transformations) # Dataset A dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase, transform=transform, return_paths=True) data_loader = torch.utils.data.DataLoader( dataset, batch_size=self.opt.batchSize, shuffle=not self.opt.serial_batches, num_workers=int(self.opt.nThreads)) self.dataset = dataset flip = opt.isTrain and not opt.no_flip self.paired_data = PairedData(data_loader, opt.fineSize, opt.max_dataset_size, flip)
def toTensor(self, img): encode = transforms.Compose([transforms.Scale(self.img_size), transforms.ToTensor(), transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]), transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[1,1,1]), transforms.Lambda(lambda x: x.mul_(255)), ]) return encode(Image.open(img))
def __init__(self, path, img_size, batch_size, is_cuda): self._img_files = os.listdir(path) self._path = path self._is_cuda = is_cuda self._step = 0 self._batch_size = batch_size self.sents_size = len(self._img_files) self._stop_step = self.sents_size // batch_size self._encode = transforms.Compose([ transforms.Scale(img_size), transforms.RandomCrop(img_size), transforms.ToTensor() ])
def get_loader(image_path, image_size, batch_size, num_workers=2): """Builds and returns Dataloader.""" transform = transforms.Compose([ transforms.Scale(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) dataset = ImageFolder(image_path, transform) data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) return data_loader
def returnTF(): # load the image transformer tf = trn.Compose([ trn.Scale((224,224)), trn.ToTensor(), trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) return tf
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list return transforms.Compose(t_list)
def default_inception_transform(img_size): tf = transforms.Compose([ transforms.Scale(img_size), transforms.CenterCrop(img_size), transforms.ToTensor(), LeNormalize(), ]) return tf
def default_transform(size): transform = transforms.Compose([ transforms.Scale(size), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], # resnet imagnet std=[0.229, 0.224, 0.225]) ]) return transform
def img_transform(crop_size, upscale_factor=1): return transforms.Compose([ transforms.Scale(crop_size // upscale_factor), transforms.CenterCrop(crop_size // upscale_factor), transforms.ToTensor()])
def MNIST_loader(root, image_size, normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, ), (0.5, ))) mnist_data = dset.MNIST(root=root, download=True, transform=transforms.Compose(transformations)) return mnist_data
def CIFAR10_loader(root, image_size, normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) cifar10_data = dset.CIFAR10(root=root, download=True, transform=transforms.Compose(transformations)) return cifar10_data
def CUB200_2010_loader(root, image_size, normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) cub200_2010_data = CUB2002010(root=root, download=True, transform=transforms.Compose(transformations)) return cub200_2010_data
def FASHIONMNIST_loader(root, image_size, normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, ), (0.5, ))) fash_mnist_data = dset.FashionMNIST(root=root, download=True, transform=transforms.Compose(transformations)) return fash_mnist_data
def __init__(self, opt): transform_list = [] if (opt.crop_height > 0) and (opt.crop_width > 0): transform_list.append(transforms.CenterCrop(opt.crop_height, crop_width)) elif opt.crop_size > 0: transform_list.append(transforms.CenterCrop(opt.crop_size)) transform_list.append(transforms.Scale(opt.image_size)) transform_list.append(transforms.CenterCrop(opt.image_size)) transform_list.append(transforms.ToTensor()) if opt.dataset == 'cifar10': dataset1 = datasets.CIFAR10(root = opt.dataroot, download = True, transform = transforms.Compose(transform_list)) dataset2 = datasets.CIFAR10(root = opt.dataroot, train = False, transform = transforms.Compose(transform_list)) def get_data(k): if k < len(dataset1): return dataset1[k][0] else: return dataset2[k - len(dataset1)][0] else: if opt.dataset in ['imagenet', 'folder', 'lfw']: dataset = datasets.ImageFolder(root = opt.dataroot, transform = transforms.Compose(transform_list)) elif opt.dataset == 'lsun': dataset = datasets.LSUN(db_path = opt.dataroot, classes = [opt.lsun_class + '_train'], transform = transforms.Compose(transform_list)) def get_data(k): return dataset[k][0] data_index = torch.load(os.path.join(opt.dataroot, 'data_index.pt')) train_index = data_index['train'] self.opt = opt self.get_data = get_data self.train_index = data_index['train'] self.counter = 0
def get_dataloader(opt): if opt.dataset in ['imagenet', 'folder', 'lfw']: # folder dataset dataset = dset.ImageFolder(root=opt.dataroot, transform=transforms.Compose([ transforms.Scale(opt.imageScaleSize), transforms.CenterCrop(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) elif opt.dataset == 'lsun': dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'], transform=transforms.Compose([ transforms.Scale(opt.imageScaleSize), transforms.CenterCrop(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])) elif opt.dataset == 'cifar10': dataset = dset.CIFAR10(root=opt.dataroot, download=True, transform=transforms.Compose([ transforms.Scale(opt.imageSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) ) assert dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batch_size, shuffle=True, num_workers=int(opt.workers)) return dataloader
def Imagenet_LMDB_generate(imagenet_dir, output_dir, make_val=False, make_train=False): # the imagenet_dir should have direction named 'train' or 'val',with 1000 folders of raw jpeg photos train_name = 'imagenet_train_lmdb' val_name = 'imagenet_val_lmdb' def target_trans(target): return target if make_val: val_lmdb=lmdb_datasets.LMDB_generator(osp.join(output_dir,val_name)) def trans_val_data(dir): tensor = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor() ])(dir) tensor=(tensor.numpy()*255).astype(np.uint8) return tensor val = datasets.ImageFolder(osp.join(imagenet_dir,'val'), trans_val_data,target_trans) val_lmdb.write_classification_lmdb(val, num_per_dataset=DATASET_SIZE) if make_train: train_lmdb = lmdb_datasets.LMDB_generator(osp.join(output_dir, train_name)) def trans_train_data(dir): tensor = transforms.Compose([ transforms.Scale(256), transforms.ToTensor() ])(dir) tensor=(tensor.numpy()*255).astype(np.uint8) return tensor train = datasets.ImageFolder(osp.join(imagenet_dir, 'train'), trans_train_data, target_trans) train.imgs=np.random.permutation(train.imgs) train_lmdb.write_classification_lmdb(train, num_per_dataset=DATASET_SIZE, write_shape=True)