我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用torchvision.transforms.ToTensor()。
def stylize(args): content_image = utils.load_image(args.content_image, scale=args.content_scale) content_transform = transforms.Compose([ transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255)) ]) content_image = content_transform(content_image) content_image = content_image.unsqueeze(0) if args.cuda: content_image = content_image.cuda() content_image = Variable(content_image, volatile=True) style_model = TransformerNet() style_model.load_state_dict(torch.load(args.model)) if args.cuda: style_model.cuda() output = style_model(content_image) if args.cuda: output = output.cpu() output_data = output.data[0] utils.save_image(args.output_image, output_data)
def load_labels(data_dir,resize=(224,224)): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } dsets = {x: datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms[x]) for x in ['train']} return (dsets['train'].classes)
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSizeX, opt.loadSizeY] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSizeX))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def get_loader(config): """Builds and returns Dataloader for MNIST and SVHN dataset.""" transform = transforms.Compose([ transforms.Scale(config.image_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) svhn = datasets.SVHN(root=config.svhn_path, download=True, transform=transform) mnist = datasets.MNIST(root=config.mnist_path, download=True, transform=transform) svhn_loader = torch.utils.data.DataLoader(dataset=svhn, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) mnist_loader = torch.utils.data.DataLoader(dataset=mnist, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers) return svhn_loader, mnist_loader
def get_mnist(train): """Get MNIST dataset loader.""" # image pre-processing pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize( mean=params.dataset_mean, std=params.dataset_std)]) # dataset and data loader mnist_dataset = datasets.MNIST(root=params.data_root, train=train, transform=pre_process, download=True) mnist_data_loader = torch.utils.data.DataLoader( dataset=mnist_dataset, batch_size=params.batch_size, shuffle=True) return mnist_data_loader
def dataLoader(is_train=True, cuda=True, batch_size=64, shuffle=True): if is_train: trans = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(mean=[n/255. for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])] trans = transforms.Compose(trans) train_set = td.CIFAR100('data', train=True, transform=trans) size = len(train_set.train_labels) train_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=shuffle) else: trans = [transforms.ToTensor(), transforms.Normalize(mean=[n/255. for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])] trans = transforms.Compose(trans) test_set = td.CIFAR100('data', train=False, transform=trans) size = len(test_set.test_labels) train_loader = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=shuffle) return train_loader, size
def get_data_loader(dataset_name, batch_size=1, dataset_transforms=None, is_training_set=True, shuffle=True): if not dataset_transforms: dataset_transforms = [] trans = transforms.Compose([transforms.ToTensor()] + dataset_transforms) dataset = getattr(datasets, dataset_name) return DataLoader( dataset(root=DATA_DIR, train=is_training_set, transform=trans, download=True), batch_size=batch_size, shuffle=shuffle )
def mnist(self): norm_mean = [0.1307] norm_std = [0.3081] train_loader = torch.utils.data.DataLoader( dsets.MNIST("/home/dataset/mnist", train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std) ])), batch_size=self.train_batch_size, shuffle=True, num_workers=self.n_threads, pin_memory=False ) test_loader = torch.utils.data.DataLoader( dsets.MNIST("/home/dataset/mnist", train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std) ])), batch_size=self.test_batch_size, shuffle=True, num_workers=self.n_threads, pin_memory=False ) return train_loader, test_loader
def test_getitem(self): import torchvision.transforms as t from reid.datasets.viper import VIPeR from reid.utils.data.preprocessor import Preprocessor root, split_id, num_val = '/tmp/open-reid/viper', 0, 100 dataset = VIPeR(root, split_id=split_id, num_val=num_val, download=True) preproc = Preprocessor(dataset.train, root=dataset.images_dir, transform=t.Compose([ t.Scale(256), t.CenterCrop(224), t.ToTensor(), t.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ])) self.assertEquals(len(preproc), len(dataset.train)) img, pid, camid = preproc[0] self.assertEquals(img.size(), (3, 224, 224))
def get_usps(train): """Get USPS dataset loader.""" # image pre-processing pre_process = transforms.Compose([transforms.ToTensor(), transforms.Normalize( mean=params.dataset_mean, std=params.dataset_std)]) # dataset and data loader usps_dataset = USPS(root=params.data_root, train=train, transform=pre_process, download=True) usps_data_loader = torch.utils.data.DataLoader( dataset=usps_dataset, batch_size=params.batch_size, shuffle=True) return usps_data_loader
def load_data(self): print('=' * 50) print('Loading data...') transform = transforms.Compose([ transforms.ImageOps.grayscale, transforms.Scale((cfg.img_width, cfg.img_height)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) syn_train_folder = torchvision.datasets.ImageFolder(root=cfg.syn_path, transform=transform) # print(syn_train_folder) self.syn_train_loader = Data.DataLoader(syn_train_folder, batch_size=cfg.batch_size, shuffle=True, pin_memory=True) print('syn_train_batch %d' % len(self.syn_train_loader)) real_folder = torchvision.datasets.ImageFolder(root=cfg.real_path, transform=transform) # real_folder.imgs = real_folder.imgs[:2000] self.real_loader = Data.DataLoader(real_folder, batch_size=cfg.batch_size, shuffle=True, pin_memory=True) print('real_batch %d' % len(self.real_loader))
def train(self): training_set = spatial_dataset(dic=self.dic_training, root_dir=self.data_path, mode='train', transform = transforms.Compose([ transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])) print '==> Training data :',len(training_set),'frames' print training_set[1][0]['img1'].size() train_loader = DataLoader( dataset=training_set, batch_size=self.BATCH_SIZE, shuffle=True, num_workers=self.num_workers) return train_loader
def validate(self): validation_set = spatial_dataset(dic=self.dic_testing, root_dir=self.data_path, mode='val', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])) print '==> Validation data :',len(validation_set),'frames' print validation_set[1][1].size() val_loader = DataLoader( dataset=validation_set, batch_size=self.BATCH_SIZE, shuffle=False, num_workers=self.num_workers) return val_loader
def train(self): training_set = motion_dataset(dic=self.dic_video_train, in_channel=self.in_channel, root_dir=self.data_path, mode='train', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), ])) print '==> Training data :',len(training_set),' videos',training_set[1][0].size() train_loader = DataLoader( dataset=training_set, batch_size=self.BATCH_SIZE, shuffle=True, num_workers=self.num_workers, pin_memory=True ) return train_loader
def val(self): validation_set = motion_dataset(dic= self.dic_test_idx, in_channel=self.in_channel, root_dir=self.data_path , mode ='val', transform = transforms.Compose([ transforms.Scale([224,224]), transforms.ToTensor(), ])) print '==> Validation data :',len(validation_set),' frames',validation_set[1][1].size() #print validation_set[1] val_loader = DataLoader( dataset=validation_set, batch_size=self.BATCH_SIZE, shuffle=False, num_workers=self.num_workers) return val_loader
def buildData(self, srcBatch, goldBatch): # This needs to be the same as preprocess.py. if self._type == "text": srcData = [self.src_dict.convertToIdx(b, onmt.Constants.UNK_WORD) for b in srcBatch] elif self._type == "img": srcData = [transforms.ToTensor()( Image.open(self.opt.src_img_dir + "/" + b[0])) for b in srcBatch] tgtData = None if goldBatch: tgtData = [self.tgt_dict.convertToIdx(b, onmt.Constants.UNK_WORD, onmt.Constants.BOS_WORD, onmt.Constants.EOS_WORD) for b in goldBatch] return onmt.Dataset(srcData, tgtData, self.opt.batch_size, self.opt.cuda, volatile=True, data_type=self._type)
def init_learning(self, model, criterion): if self._state('train_transform') is None: normalize = transforms.Normalize(mean=model.image_normalization_mean, std=model.image_normalization_std) self.state['train_transform'] = transforms.Compose([ Warp(self.state['image_size']), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) if self._state('val_transform') is None: normalize = transforms.Normalize(mean=model.image_normalization_mean, std=model.image_normalization_std) self.state['val_transform'] = transforms.Compose([ Warp(self.state['image_size']), transforms.ToTensor(), normalize, ]) self.state['best_score'] = 0
def train(rank, args, model): torch.manual_seed(args.seed + rank) train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, num_workers=1) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batch_size, shuffle=True, num_workers=1) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): train_epoch(epoch, args, model, train_loader, optimizer) test_epoch(model, test_loader)
def imagenet(): channel_stats = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformation = data.TransformTwice(transforms.Compose([ transforms.RandomRotation(10), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), transforms.ToTensor(), transforms.Normalize(**channel_stats) ])) eval_transformation = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) return { 'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'data-local/images/ilsvrc2012/', 'num_classes': 1000 }
def cifar10(): channel_stats = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]) train_transformation = data.TransformTwice(transforms.Compose([ data.RandomTranslateWithReflect(4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**channel_stats) ])) eval_transformation = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) return { 'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'data-local/images/cifar/cifar10/by-image', 'num_classes': 10 }
def imagenet_transform(scale_size=256, input_size=224, train=True, allow_var_size=False): normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} if train: return transforms.Compose([ transforms.Scale(scale_size), transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ]) elif allow_var_size: return transforms.Compose([ transforms.Scale(scale_size), transforms.ToTensor(), transforms.Normalize(**normalize) ]) else: return transforms.Compose([ transforms.Scale(scale_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def __init__(self, Nj, gpu, model_file, filename): # validate arguments. self.gpu = (gpu >= 0) if self.gpu and not torch.cuda.is_available(): raise GPUNotFoundError('GPU is not found.') # initialize model to estimate. self.model = AlexNet(Nj) self.model.load_state_dict(torch.load(model_file)) # prepare gpu. if self.gpu: self.model.cuda() # load dataset to estimate. self.dataset = PoseDataset( filename, input_transform=transforms.Compose([ transforms.ToTensor(), RandomNoise()]), output_transform=Scale(), transform=Crop(data_augmentation=True))
def __init__(self, Nj, gpu, model_file, filename): # validate arguments. self.gpu = (gpu >= 0) if self.gpu and not torch.cuda.is_available(): raise GPUNotFoundError('GPU is not found.') # initialize model to estimate. self.model = AlexNet(Nj) self.model.load_state_dict(torch.load(model_file)) # prepare gpu. if self.gpu: self.model.cuda() # load dataset to estimate. self.dataset = PoseDataset( filename, input_transform=transforms.Compose([ transforms.ToTensor(), RandomNoise()]), output_transform=Scale(), transform=Crop())
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def __init__(self, batch_size): self.batch_size = batch_size train_dataset = dsets.MNIST(root="./data", train=True, transform=transforms.ToTensor(), download=True) test_dataset = dsets.MNIST(root="./data", train=False, transform=transforms.ToTensor()) self.train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) self.test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
def load_image_for_prediction(opt, image_path): """ load image for prediction, pre process as the same of train, and also return a dict :param opt: :param image_path: :return: """ image = Image.open(image_path) transformations = transforms.Compose([transforms.Scale(opt.loadSize), transforms.RandomCrop(opt.fineSize), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) image_tensor = transformations(image).float() image_tensor.unsqueeze_(0) return {'A': image_tensor, 'A_paths': image_path, 'B': image_tensor, 'B_paths': image_path}
def get_loaders(loader_batchsize, **kwargs): arguments=kwargs['arguments'] data = arguments.data if data == 'mnist': kwargs = {'num_workers': 1, 'pin_memory': True} if arguments.cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), #transforms.Normalize((0,), (1,)) ])), batch_size=loader_batchsize, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor(), #transforms.Normalize((7,), (0.3081,)) ])), batch_size=loader_batchsize, shuffle=True, **kwargs) return train_loader, test_loader
def LSUN_loader(root, image_size, classes=['bedroom'], normalize=True): """ Function to load torchvision dataset object based on just image size Args: root = If your dataset is downloaded and ready to use, mention the location of this folder. Else, the dataset will be downloaded to this location image_size = Size of every image classes = Default class is 'bedroom'. Other available classes are: 'bridge', 'church_outdoor', 'classroom', 'conference_room', 'dining_room', 'kitchen', 'living_room', 'restaurant', 'tower' normalize = Requirement to normalize the image. Default is true """ transformations = [transforms.Scale(image_size), transforms.CenterCrop(image_size), transforms.ToTensor()] if normalize == True: transformations.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))) for c in classes: c = c + '_train' lsun_data = dset.LSUN(db_path=root, classes=classes, transform=transforms.Compose(transformations)) return lsun_data
def test(): import torchvision transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225)) ]) dataset = ListDataset(root='/mnt/hgfs/D/download/PASCAL_VOC/voc_all_images', list_file='./data/voc12_train.txt', train=True, transform=transform, input_size=600) dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=False, num_workers=1, collate_fn=dataset.collate_fn) for images, loc_targets, cls_targets in dataloader: print(images.size()) print(loc_targets.size()) print(cls_targets.size()) grid = torchvision.utils.make_grid(images, 1) torchvision.utils.save_image(grid, 'a.jpg') break # test()
def data_loader(image_root, data_list, shuffle=True, batch_size=64, workers=20, is_cuda=True, is_visualization=False): kwargs = {'num_workers': workers, 'pin_memory': True} if is_cuda else {} transform=transforms.Compose([ trans.person_crop(ratio=(1, 0.75),crop_type=1),\ trans.scale(size=(64, 128)),\ transforms.ToTensor() ]) preid = dataset.listDataset( image_root, data_list, shuffle, transform=transform, is_visualization=is_visualization) data_loader = torch.utils.data.DataLoader(preid, batch_size=batch_size, shuffle=True, **kwargs) return data_loader
def get_transform(resize_crop='resize_and_crop', flip=True, loadSize=286, fineSize=256): transform_list = [] if resize_crop == 'resize_and_crop': osize = [loadSize, loadSize] transform_list.append(transforms.Resize(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(fineSize)) elif resize_crop == 'crop': transform_list.append(transforms.RandomCrop(fineSize)) elif resize_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, fineSize))) elif resize_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, loadSize))) transform_list.append(transforms.RandomCrop(fineSize)) if flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def transform(is_train=True, normalize=True): """ Returns a transform object """ filters = [] filters.append(Scale(256)) if is_train: filters.append(RandomCrop(224)) else: filters.append(CenterCrop(224)) if is_train: filters.append(RandomHorizontalFlip()) filters.append(ToTensor()) if normalize: filters.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])) return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False): self.crop_size = crop_size self.y_offset = y_offset self.flip = flip if self.flip: self.post_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) else: self.post_transform = transforms.Compose([ transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])
def test(argv=sys.argv[1:]): input = "../dataset/BSDS300/images/val/54082.jpg" #input = "../dataset/BSDS300/images/val/159008.jpg" output = "sr_{}".format(basename(input)) # save in cwd output2 = "sr__{}".format(basename(input)) model = "snapshot/gnet-epoch-1-pretrain.pth" #model = "snapshot/gnet-epoch-200.pth" cuda = True img = Image.open(input) width, height = img.size gennet = torch.load(model) img = ToTensor()(img) # [c,w,h]->[1,c,h,w] input = Variable(img).view(1, 3, height, width) if cuda: gennet = gennet.cuda() input = input.cuda() pred = gennet(input).cpu() save_image(pred.data, output) #ToPILImage()(pred.data).save(output) toImage(pred).save(output2)
def transform_input(crop_size, upscale_factor): """LR of target image """ return Compose([ Scale(crop_size // upscale_factor), ]) # def transform_target_batch(crop_size): # def transform(image): # patches = extract_subimages(image, crop_size, crop_size) # patches = [ToTensor()(x) for x in patches] # return stack(patches, 0) # return transform # def transform_input_batch(crop_size, upscale_factor): # def transform(image): # patches = extract_subimages(image, crop_size, crop_size) # patches = [Compose([Scale(crop_size//upscale_factor), ToTensor()])(x) for x in patches] # return stack(patches, 0) # return transform
def __init__(self, size, interpolation=Image.BILINEAR): self.size = size self.interpolation = interpolation self.toTensor = transforms.ToTensor()
def load_data(resize): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ #Higher scale-up for inception transforms.Scale(int(max(resize)/224*256)), transforms.CenterCrop(max(resize)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'PlantVillage' dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'val']} dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']} dset_classes = dsets['train'].classes return dset_loaders['train'], dset_loaders['val']
def __init__(self, env): super(CartPoleWrapper, self).__init__() self.env = env.unwrapped self.resize = T.Compose([T.ToPILImage(), T.Scale(40, interpolation=Image.CUBIC), T.ToTensor()]) self.screen_width = 600 self.action_space = self.env.action_space
def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list return transforms.Compose(t_list)
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): padding = int((scale_size - input_size) / 2) return transforms.Compose([ transforms.RandomCrop(input_size, padding=padding), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ])
def inception_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomSizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(opt.imageSize // 4, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
def CreateDataLoader(opt): random.seed(opt.manualSeed) # folder dataset CTrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) VTrans = transforms.Compose([ RandomSizedCrop(224, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) STrans = transforms.Compose([ transforms.Scale(opt.imageSize, Image.BICUBIC), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) dataset = ImageFolder(rootC=opt.datarootC, rootS=opt.datarootS, transform=CTrans, vtransform=VTrans, stransform=STrans ) assert dataset return data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)