我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torchvision.transforms.Normalize()。
def load_labels(data_dir,resize=(224,224)): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } dsets = {x: datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms[x]) for x in ['train']} return (dsets['train'].classes)
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSizeX, opt.loadSizeY] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSizeX))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def get_loader(config): """Builds and returns Dataloader for MNIST and SVHN dataset.""" transform = transforms.Compose([ transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform) mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform) svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) return svhn_loader, mnist_loader
def get_mnist(train): """Get MNIST dataset loader.""" # image pre-processing pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize( mean=params.dataset_mean, std=params.dataset_std)]) # dataset and data loader mnist_dataset = datasets.MNIST(root=params.data_root, train=train, transform=pre_process, download=True) mnist_data_loader = torch.utils.data.DataLoader( dataset=mnist_dataset, batch_size=params.batch_size, shuffle=True) return mnist_data_loader
def dataLoader(is_train=True, cuda=True, batch_size=64, shuffle=True): if is_train: trans = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(mean=[n/255. for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])] trans = transforms.Compose(trans) train_set = td.CIFAR100('data', train=True, transform=trans) size = len(train_set.train_labels) train_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=shuffle) else: trans = [transforms.ToTensor(), transforms.Normalize(mean=[n/255. for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])] trans = transforms.Compose(trans) test_set = td.CIFAR100('data', train=False, transform=trans) size = len(test_set.test_labels) train_loader = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=shuffle) return train_loader, size
def mnist(self): norm_mean = [0.1307] norm_std = [0.3081] train_loader = torch.utils.data.DataLoader( dsets.MNIST("/home/dataset/mnist", train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std) ])), batch_size=self.train_batch_size, shuffle=True, num_workers=self.n_threads, pin_memory=False ) test_loader = torch.utils.data.DataLoader( dsets.MNIST("/home/dataset/mnist", train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std) ])), batch_size=self.test_batch_size, shuffle=True, num_workers=self.n_threads, pin_memory=False ) return train_loader, test_loader
def test_getitem(self): import torchvision.transforms as t from reid.datasets.viper import VIPeR from reid.utils.data.preprocessor import Preprocessor root, split_id, num_val = '/tmp/open-reid/viper', 0, 100 dataset = VIPeR(root, split_id=split_id, num_val=num_val, download=True) preproc = Preprocessor(dataset.train, root=dataset.images_dir, transform=t.Compose([ t.Scale(256), t.CenterCrop(224), t.ToTensor(), t.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])) self.assertEquals(len(preproc), len(dataset.train)) img, pid, camid = preproc[0] self.assertEquals(img.size(), (3, 224, 224))
def get_usps(train): """Get USPS dataset loader.""" # image pre-processing pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize( mean=params.dataset_mean, std=params.dataset_std)]) # dataset and data loader usps_dataset = USPS(root=params.data_root, train=train, transform=pre_process, download=True) usps_data_loader = torch.utils.data.DataLoader( dataset=usps_dataset, batch_size=params.batch_size, shuffle=True) return usps_data_loader
def load_data(self): print('=' * 50) print('Loading data...') transform = transforms.Compose([ transforms.ImageOps.grayscale, transforms.Scale((cfg.img_width, cfg.img_height)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) syn_train_folder = torchvision.datasets.ImageFolder(root=cfg.syn_path, transform=transform) # print(syn_train_folder) self.syn_train_loader = Data.DataLoader(syn_train_folder, batch_size=cfg.batch_size, shuffle=True, pin_memory=True) print('syn_train_batch %d' % len(self.syn_train_loader)) real_folder = torchvision.datasets.ImageFolder(root=cfg.real_path, transform=transform) # real_folder.imgs = real_folder.imgs[:2000] self.real_loader = Data.DataLoader(real_folder, batch_size=cfg.batch_size, shuffle=True, pin_memory=True) print('real_batch %d' % len(self.real_loader))
def train(self): training_set = spatial_dataset(dic=self.dic_training, root_dir=self.data_path, mode='train', transform = transforms.Compose([ transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])) print '==> Training data :',len(training_set),'frames' print training_set[1][0]['img1'].size() train_loader = DataLoader( dataset=training_set, batch_size=self.BATCH_SIZE, shuffle=True, num_workers=self.num_workers) return train_loader
def validate(self): validation_set = spatial_dataset(dic=self.dic_testing, root_dir=self.data_path, mode='val', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])) print '==> Validation data :',len(validation_set),'frames' print validation_set[1][1].size() val_loader = DataLoader( dataset=validation_set, batch_size=self.BATCH_SIZE, shuffle=False, num_workers=self.num_workers) return val_loader
def init_learning(self, model, criterion): if self._state('train_transform') is None: normalize = transforms.Normalize(mean=model.image_normalization_mean, std=model.image_normalization_std) self.state['train_transform'] = transforms.Compose([ Warp(self.state['image_size']), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) if self._state('val_transform') is None: normalize = transforms.Normalize(mean=model.image_normalization_mean, std=model.image_normalization_std) self.state['val_transform'] = transforms.Compose([ Warp(self.state['image_size']), transforms.ToTensor(), normalize, ]) self.state['best_score'] = 0
def train(rank, args, model): torch.manual_seed(args.seed + rank) train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, num_workers=1) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, num_workers=1) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): train_epoch(epoch, args, model, train_loader, optimizer) test_epoch(model, test_loader)
def imagenet(): channel_stats = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformation = data.TransformTwice(transforms.Compose([ transforms.RandomRotation(10), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), transforms.ToTensor(), transforms.Normalize(**channel_stats) ])) eval_transformation = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) return { 'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'data-local/images/ilsvrc2012/', 'num_classes': 1000 }
def cifar10(): channel_stats = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]) train_transformation = data.TransformTwice(transforms.Compose([ data.RandomTranslateWithReflect(4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**channel_stats) ])) eval_transformation = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) return { 'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'data-local/images/cifar/cifar10/by-image', 'num_classes': 10 }
def imagenet_transform(scale_size=256, input_size=224, train=True, allow_var_size=False): normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} if train: return transforms.Compose([ transforms.Scale(scale_size), transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ]) elif allow_var_size: return transforms.Compose([ transforms.Scale(scale_size), transforms.ToTensor(), transforms.Normalize(**normalize) ]) else: return transforms.Compose([ transforms.Scale(scale_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def load_image_for_prediction(opt, image_path): """ load image for prediction, pre process as the same of train, and also return a dict :param opt: :param image_path: :return: """ image = Image.open(image_path) transformations = transforms.Compose([transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) image_tensor = transformations(image).float() image_tensor.unsqueeze_(0) return {'A': image_tensor, 'A_paths': image_path, 'B': image_tensor, 'B_paths': image_path}
def get_loaders(loader_batchsize, **kwargs): arguments=kwargs['arguments'] data = arguments.data if data == 'mnist': kwargs = {'num_workers': 1, 'pin_memory': True} if arguments.cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), #transforms.Normalize((0,), (1,)) ])), batch_size=loader_batchsize, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), #transforms.Normalize((7,), (0.3081,)) ])), batch_size=loader_batchsize, shuffle=True, **kwargs) return train_loader, test_loader
def LSUN_loader(root, image_size, classes=['bedroom'], normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image classes = Default class is 'bedroom'. Other available classes are: 'bridge', 'church_outdoor', 'classroom', 'conference_room', 'dining_room', 'kitchen', 'living_room', 'restaurant', 'tower' normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.CenterCrop(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) for c in classes: c = c + '_train' lsun_data = dset.LSUN(db_path=root, classes=classes, transform=transforms.Compose(transformations)) return lsun_data
def test(): import torchvision transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)) ]) dataset = ListDataset(root='/mnt/hgfs/D/download/PASCAL_VOC/voc_all_images', list_file='./data/voc12_train.txt', train=True, transform=transform, input_size=600) dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn) for images, loc_targets, cls_targets in dataloader: print(images.size()) print(loc_targets.size()) print(cls_targets.size()) grid = torchvision.utils.make_grid(images, 1) torchvision.utils.save_image(grid, 'a.jpg') break # test()
def get_transform(resize_crop='resize_and_crop', flip=True, loadSize=286, fineSize=256): transform_list = [] if resize_crop == 'resize_and_crop': osize = [loadSize, loadSize] transform_list.append(transforms.Resize(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(fineSize)) elif resize_crop == 'crop': transform_list.append(transforms.RandomCrop(fineSize)) elif resize_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, fineSize))) elif resize_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, loadSize))) transform_list.append(transforms.RandomCrop(fineSize)) if flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def transform(is_train=True, normalize=True): """ Returns a transform object """ filters = [] filters.append(Scale(256)) if is_train: filters.append(RandomCrop(224)) else: filters.append(CenterCrop(224)) if is_train: filters.append(RandomHorizontalFlip()) filters.append(ToTensor()) if normalize: filters.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])) return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False): self.crop_size = crop_size self.y_offset = y_offset self.flip = flip if self.flip: self.post_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) else: self.post_transform = transforms.Compose([ transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])
def load_data(resize): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ #Higher scale-up for inception transforms.Scale(int(max(resize)/224*256)), transforms.CenterCrop(max(resize)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'PlantVillage' dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'val']} dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']} dset_classes = dsets['train'].classes return dset_loaders['train'], dset_loaders['val']
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): padding = int((scale_size - input_size) / 2) return transforms.Compose([ transforms.RandomCrop(input_size, padding=padding), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ])
def inception_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomSizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(224, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) def jitter(x): ran = random.uniform(0.7, 1) return x * ran + 1 - ran STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Lambda(jitter), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def get_loader(config): """Builds and returns Dataloader for MNIST and SVHN dataset.""" transform = transforms.Compose([ transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='train') mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=True) svhn_test = datasets.SVHN(root=config.svhn_path, download=True, transform=transform, split='test') mnist_test = datasets.MNIST(root=config.mnist_path, download=True, transform=transform, train=False) svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) svhn_test_loader = torch.utils.data.DataLoader(dataset=svhn_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) mnist_test_loader = torch.utils.data.DataLoader(dataset=mnist_test, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers) return svhn_loader, mnist_loader, svhn_test_loader, mnist_test_loader
def initialize(self, opt): BaseDataLoader.initialize(self, opt) transformations = [transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] transform = transforms.Compose(transformations) # Dataset A dataset_A = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'A', transform=transform, return_paths=True) data_loader_A = torch.utils.data.DataLoader( dataset_A, batch_size=self.opt.batchSize, shuffle=not self.opt.serial_batches, num_workers=int(self.opt.nThreads)) # Dataset B dataset_B = ImageFolder(root=opt.dataroot + '/' + opt.phase + 'B', transform=transform, return_paths=True) data_loader_B = torch.utils.data.DataLoader( dataset_B, batch_size=self.opt.batchSize, shuffle=not self.opt.serial_batches, num_workers=int(self.opt.nThreads)) self.dataset_A = dataset_A self.dataset_B = dataset_B flip = opt.isTrain and not opt.no_flip self.paired_data = PairedData(data_loader_A, data_loader_B, self.opt.max_dataset_size, flip)
def initialize(self, opt): BaseDataLoader.initialize(self, opt) self.fineSize = opt.fineSize transformations = [ # TODO: Scale transforms.Scale(opt.loadSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] transform = transforms.Compose(transformations) # Dataset A dataset = ImageFolder(root=opt.dataroot + '/' + opt.phase, transform=transform, return_paths=True) data_loader = torch.utils.data.DataLoader( dataset, batch_size=self.opt.batchSize, shuffle=not self.opt.serial_batches, num_workers=int(self.opt.nThreads)) self.dataset = dataset flip = opt.isTrain and not opt.no_flip self.paired_data = PairedData(data_loader, opt.fineSize, opt.max_dataset_size, flip)
def initialize(self, opt): self.opt = opt self.root = opt.dataroot self.dir_AB = os.path.join(opt.dataroot, opt.phase) self.AB_paths = sorted(make_dataset(self.dir_AB)) #assert(opt.resize_or_crop == 'resize_and_crop') transform_list = [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] self.transform = transforms.Compose(transform_list)
def main(): training_batch_size = 352 validation_batch_size = 352 net = get_res152(num_classes=num_classes, snapshot_path=os.path.join( ckpt_path, 'epoch_15_validation_loss_0.0772_iter_1000.pth')).cuda() net.eval() transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize([0.311, 0.340, 0.299], [0.167, 0.144, 0.138]) ]) criterion = nn.MultiLabelSoftMarginLoss().cuda() train_set = MultipleClassImageFolder(split_train_dir, transform) train_loader = DataLoader(train_set, batch_size=training_batch_size, num_workers=16) batch_outputs, batch_labels = predict(net, train_loader) loss = criterion(batch_outputs, batch_labels) print 'training loss %.4f' % loss.cpu().data.numpy()[0] batch_outputs = batch_outputs.cpu().data.numpy() batch_labels = batch_labels.cpu().data.numpy() thretholds = find_best_threthold(batch_outputs, batch_labels) val_set = MultipleClassImageFolder(split_val_dir, transform) val_loader = DataLoader(val_set, batch_size=validation_batch_size, num_workers=16) batch_outputs, batch_labels = predict(net, val_loader) loss = criterion(batch_outputs, batch_labels) print 'validation loss %.4f' % loss.cpu().data.numpy()[0] batch_outputs = batch_outputs.cpu().data.numpy() batch_labels = batch_labels.cpu().data.numpy() sio.savemat('./val_output.mat', {'outputs': batch_outputs, 'labels': batch_labels}) prediction = get_one_hot_prediction(batch_outputs, thretholds) evaluation = evaluate(prediction, batch_labels) print 'validation evaluation: accuracy %.4f, precision %.4f, recall %.4f, f2 %.4f' % ( evaluation[0], evaluation[1], evaluation[2], evaluation[3])
def toTensor(self, img): encode = transforms.Compose([transforms.Scale(self.img_size), transforms.ToTensor(), transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]), transforms.Normalize(mean=[0.40760392, 0.45795686, 0.48501961], std=[1,1,1]), transforms.Lambda(lambda x: x.mul_(255)), ]) return encode(Image.open(img))
def tensor2img(self, tensor): decode = transforms.Compose([transforms.Lambda(lambda x: x.mul_(1./255)), transforms.Normalize(mean=[-0.40760392, -0.45795686, -0.48501961], std=[1,1,1]), transforms.Lambda(lambda x: x[torch.LongTensor([2,1,0])]), ]) tensor = decode(tensor) loader = transforms.Compose([transforms.ToPILImage()]) img = loader(tensor.clamp_(0, 1)) img.save(self.img_path + "/result.jpg")
def preprocess_torch(image_path, size): # PIL reads image in RGB format normalize = transforms.Normalize(mean=MEAN, std=STD) transformer = transforms.Compose([ transforms.ToTensor(), normalize]) image = Image.open(image_path) image = image.resize(size) image = Variable(transformer(image), requires_grad=False) image = image.unsqueeze(0) return image
def get_cifar10_loaders(root_directory, train_batch_size=128, test_batch_size=100, download=False): # Data preparation for CIFAR10. Borrowed from # https://github.com/kuangliu/pytorch-cifar/blob/master/main.py transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'), train=True, download=download, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'), train=False, download=download, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) return trainloader, testloader
def get_loader(image_path, image_size, batch_size, num_workers=2): """Builds and returns Dataloader.""" transform = transforms.Compose([ transforms.Scale(image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) dataset = ImageFolder(image_path, transform) data_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers) return data_loader