我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用torchvision.transforms.RandomHorizontalFlip()。
def load_labels(data_dir,resize=(224,224)): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) } dsets = {x: datasets.ImageFolder(os.path.join(data_dir, 'train'), data_transforms[x]) for x in ['train']} return (dsets['train'].classes)
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSizeX, opt.loadSizeY] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSizeX))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def dataLoader(is_train=True, cuda=True, batch_size=64, shuffle=True): if is_train: trans = [transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(), transforms.Normalize(mean=[n/255. for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])] trans = transforms.Compose(trans) train_set = td.CIFAR100('data', train=True, transform=trans) size = len(train_set.train_labels) train_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=shuffle) else: trans = [transforms.ToTensor(), transforms.Normalize(mean=[n/255. for n in [129.3, 124.1, 112.4]], std=[n/255. for n in [68.2, 65.4, 70.4]])] trans = transforms.Compose(trans) test_set = td.CIFAR100('data', train=False, transform=trans) size = len(test_set.test_labels) train_loader = torch.utils.data.DataLoader( test_set, batch_size=batch_size, shuffle=shuffle) return train_loader, size
def train(self): training_set = spatial_dataset(dic=self.dic_training, root_dir=self.data_path, mode='train', transform = transforms.Compose([ transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]) ])) print '==> Training data :',len(training_set),'frames' print training_set[1][0]['img1'].size() train_loader = DataLoader( dataset=training_set, batch_size=self.BATCH_SIZE, shuffle=True, num_workers=self.num_workers) return train_loader
def init_learning(self, model, criterion): if self._state('train_transform') is None: normalize = transforms.Normalize(mean=model.image_normalization_mean, std=model.image_normalization_std) self.state['train_transform'] = transforms.Compose([ Warp(self.state['image_size']), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) if self._state('val_transform') is None: normalize = transforms.Normalize(mean=model.image_normalization_mean, std=model.image_normalization_std) self.state['val_transform'] = transforms.Compose([ Warp(self.state['image_size']), transforms.ToTensor(), normalize, ]) self.state['best_score'] = 0
def imagenet(): channel_stats = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) train_transformation = data.TransformTwice(transforms.Compose([ transforms.RandomRotation(10), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1), transforms.ToTensor(), transforms.Normalize(**channel_stats) ])) eval_transformation = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) return { 'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'data-local/images/ilsvrc2012/', 'num_classes': 1000 }
def cifar10(): channel_stats = dict(mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]) train_transformation = data.TransformTwice(transforms.Compose([ data.RandomTranslateWithReflect(4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**channel_stats) ])) eval_transformation = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(**channel_stats) ]) return { 'train_transformation': train_transformation, 'eval_transformation': eval_transformation, 'datadir': 'data-local/images/cifar/cifar10/by-image', 'num_classes': 10 }
def imagenet_transform(scale_size=256, input_size=224, train=True, allow_var_size=False): normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} if train: return transforms.Compose([ transforms.Scale(scale_size), transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ]) elif allow_var_size: return transforms.Compose([ transforms.Scale(scale_size), transforms.ToTensor(), transforms.Normalize(**normalize) ]) else: return transforms.Compose([ transforms.Scale(scale_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Scale(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def get_transform(resize_crop='resize_and_crop', flip=True, loadSize=286, fineSize=256): transform_list = [] if resize_crop == 'resize_and_crop': osize = [loadSize, loadSize] transform_list.append(transforms.Resize(osize, Image.BICUBIC)) transform_list.append(transforms.RandomCrop(fineSize)) elif resize_crop == 'crop': transform_list.append(transforms.RandomCrop(fineSize)) elif resize_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, fineSize))) elif resize_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, loadSize))) transform_list.append(transforms.RandomCrop(fineSize)) if flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def transform(is_train=True, normalize=True): """ Returns a transform object """ filters = [] filters.append(Scale(256)) if is_train: filters.append(RandomCrop(224)) else: filters.append(CenterCrop(224)) if is_train: filters.append(RandomHorizontalFlip()) filters.append(ToTensor()) if normalize: filters.append(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])) return Compose(filters)
def __init__(self, crop_size = 128, y_offset = 15, flip = False): self.crop_size = crop_size self.y_offset = y_offset self.flip = flip if self.flip: self.post_transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) else: self.post_transform = transforms.Compose([ transforms.Scale(size = 224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ])
def load_data(resize): data_transforms = { 'train': transforms.Compose([ transforms.RandomSizedCrop(max(resize)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'val': transforms.Compose([ #Higher scale-up for inception transforms.Scale(int(max(resize)/224*256)), transforms.CenterCrop(max(resize)), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } data_dir = 'PlantVillage' dsets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']} dset_loaders = {x: torch.utils.data.DataLoader(dsets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'val']} dset_sizes = {x: len(dsets[x]) for x in ['train', 'val']} dset_classes = dsets['train'].classes return dset_loaders['train'], dset_loaders['val']
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): padding = int((scale_size - input_size) / 2) return transforms.Compose([ transforms.RandomCrop(input_size, padding=padding), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ])
def inception_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomSizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ])
def inception_color_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomSizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, ), Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']), transforms.Normalize(**normalize) ])
def get_cifar10_loaders(root_directory, train_batch_size=128, test_batch_size=100, download=False): # Data preparation for CIFAR10. Borrowed from # https://github.com/kuangliu/pytorch-cifar/blob/master/main.py transform_train = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)), ]) trainset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'), train=True, download=download, transform=transform_train) trainloader = torch.utils.data.DataLoader(trainset, batch_size=train_batch_size, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root=os.path.join(root_directory, 'data'), train=False, download=download, transform=transform_test) testloader = torch.utils.data.DataLoader(testset, batch_size=test_batch_size, shuffle=False, num_workers=2) return trainloader, testloader
def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list return transforms.Compose(t_list)
def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats, fill=0): padding = int((scale_size - input_size) / 2) return transforms.Compose([ transforms.Pad(padding, fill=fill), transforms.RandomCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ])
def __init__(self, args): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform_train = transforms.Compose([ transforms.Resize(256), transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ColorJitter(0.4,0.4,0.4), transforms.ToTensor(), Lighting(0.1, _imagenet_pca['eigval'], _imagenet_pca['eigvec']), normalize, ]) transform_test = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) trainset = MINCDataloder(root=os.path.expanduser('~/data/minc-2500/'), train=True, transform=transform_train) testset = MINCDataloder(root=os.path.expanduser('~/data/minc-2500/'), train=False, transform=transform_test) kwargs = {'num_workers': 8, 'pin_memory': True} if args.cuda else {} trainloader = torch.utils.data.DataLoader(trainset, batch_size= args.batch_size, shuffle=True, **kwargs) testloader = torch.utils.data.DataLoader(testset, batch_size= args.test_batch_size, shuffle=False, **kwargs) self.trainloader = trainloader self.testloader = testloader
def imagenet_like(): crop_size = 299#224 train_transformations = transforms.Compose([ transforms.RandomSizedCrop(crop_size), transforms.RandomHorizontalFlip(), lambda img: img if random.random() < 0.5 else img.transpose(Image.FLIP_TOP_BOTTOM), transforms.ToTensor(), ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), normalize, ]) val_transformations = transforms.Compose([ transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize, ]) test_transformation = transforms.Compose([ #TenCropPick(224), SpatialPick(), #transforms.CenterCrop(crop_size), transforms.ToTensor(), normalize, ]) return {'train': train_transformations, 'val': val_transformations, 'test': test_transformation}
def getLoader(datasetName, dataroot, originalSize, imageSize, batchSize=64, workers=4, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), split='train', shuffle=True, seed=None): #import pdb; pdb.set_trace() from datasets.folder import ImageFolder as commonDataset import torchvision.transforms as transforms if split == 'train': dataset = commonDataset(root=dataroot, transform=transforms.Compose([ transforms.Scale(originalSize), transforms.RandomCrop(imageSize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std), ]), seed=seed) else: dataset = commonDataset(root=dataroot, transform=transforms.Compose([ transforms.Scale(originalSize), transforms.CenterCrop(imageSize), transforms.ToTensor(), transforms.Normalize(mean, std), ]), seed=seed) assert dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=batchSize, shuffle=shuffle, num_workers=int(workers)) return dataloader
def get(batch_size, data_root='/mnt/local0/public_dataset/pytorch/', train=True, val=True, **kwargs): data_root = os.path.expanduser(os.path.join(data_root, 'stl10-data')) num_workers = kwargs.setdefault('num_workers', 1) kwargs.pop('input_size', None) print("Building STL10 data loader with {} workers".format(num_workers)) ds = [] if train: train_loader = torch.utils.data.DataLoader( datasets.STL10( root=data_root, split='train', download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(96), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])), batch_size=batch_size, shuffle=True, **kwargs) ds.append(train_loader) if val: test_loader = torch.utils.data.DataLoader( datasets.STL10( root=data_root, split='test', download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])), batch_size=batch_size, shuffle=False, **kwargs) ds.append(test_loader) ds = ds[0] if len(ds) == 1 else ds return ds
def get10(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs): data_root = os.path.expanduser(os.path.join(data_root, 'cifar10-data')) num_workers = kwargs.setdefault('num_workers', 1) kwargs.pop('input_size', None) print("Building CIFAR-10 data loader with {} workers".format(num_workers)) ds = [] if train: train_loader = torch.utils.data.DataLoader( datasets.CIFAR10( root=data_root, train=True, download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])), batch_size=batch_size, shuffle=True, **kwargs) ds.append(train_loader) if val: test_loader = torch.utils.data.DataLoader( datasets.CIFAR10( root=data_root, train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])), batch_size=batch_size, shuffle=False, **kwargs) ds.append(test_loader) ds = ds[0] if len(ds) == 1 else ds return ds
def get100(batch_size, data_root='/tmp/public_dataset/pytorch', train=True, val=True, **kwargs): data_root = os.path.expanduser(os.path.join(data_root, 'cifar100-data')) num_workers = kwargs.setdefault('num_workers', 1) kwargs.pop('input_size', None) print("Building CIFAR-100 data loader with {} workers".format(num_workers)) ds = [] if train: train_loader = torch.utils.data.DataLoader( datasets.CIFAR100( root=data_root, train=True, download=True, transform=transforms.Compose([ transforms.Pad(4), transforms.RandomCrop(32), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])), batch_size=batch_size, shuffle=True, **kwargs) ds.append(train_loader) if val: test_loader = torch.utils.data.DataLoader( datasets.CIFAR100( root=data_root, train=False, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])), batch_size=batch_size, shuffle=False, **kwargs) ds.append(test_loader) ds = ds[0] if len(ds) == 1 else ds return ds
def get_transform(data_name, split_name, opt): normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) t_list = [] if split_name == 'train': t_list = [transforms.RandomSizedCrop(opt.crop_size), transforms.RandomHorizontalFlip()] elif split_name == 'val': t_list = [transforms.Scale(256), transforms.CenterCrop(224)] elif split_name == 'test': t_list = [transforms.Scale(256), transforms.CenterCrop(224)] t_end = [transforms.ToTensor(), normalizer] transform = transforms.Compose(t_list + t_end) return transform
def __init__(self, num_classes=1000): super(AlexNetOWT_BN, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=False), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=False), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.BatchNorm2d(192), nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=False), nn.ReLU(inplace=True), nn.BatchNorm2d(384), nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=False), nn.ReLU(inplace=True), nn.BatchNorm2d(256), nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.BatchNorm2d(256) ) self.classifier = nn.Sequential( nn.Linear(256 * 6 * 6, 4096, bias=False), nn.BatchNorm1d(4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, 4096, bias=False), nn.BatchNorm1d(4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, num_classes) ) self.regime = [ {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-2, 'weight_decay': 5e-4, 'momentum': 0.9}, {'epoch': 10, 'lr': 5e-3}, {'epoch': 15, 'lr': 1e-3, 'weight_decay': 0}, {'epoch': 20, 'lr': 5e-4}, {'epoch': 25, 'lr': 1e-4} ] normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) self.input_transform = { 'train': transforms.Compose([ transforms.Scale(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]), 'eval': transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) }
def main(): training_batch_size = 32 validation_batch_size = 32 epoch_num = 100 iter_freq_print_training_log = 100 iter_freq_validate = 500 lr = 1e-2 weight_decay = 1e-4 net = models.get_res152(num_classes=num_classes) # net = get_res152(num_classes=num_classes, snapshot_path=os.path.join(ckpt_path, 'xxx.pth')).cuda() net.train() transform = transforms.Compose([ transforms.RandomHorizontalFlip(), RandomVerticalFlip(), transforms.ToTensor(), transforms.Normalize([0.311, 0.340, 0.299], [0.167, 0.144, 0.138]) ]) train_set = MultipleClassImageFolder(split_train_dir, transform) train_loader = DataLoader(train_set, batch_size=training_batch_size, shuffle=True, num_workers=16) val_set = MultipleClassImageFolder(split_val_dir, transform) val_loader = DataLoader(val_set, batch_size=validation_batch_size, shuffle=True, num_workers=16) criterion = nn.MultiLabelSoftMarginLoss().cuda() optimizer = optim.SGD([ {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias']}, {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'], 'weight_decay': weight_decay} ], lr=lr, momentum=0.9, nesterov=True) if not os.path.exists(ckpt_path): os.mkdir(ckpt_path) info = [1e9, 0, 0] # [best val loss, epoch, iter] for epoch in range(0, epoch_num): if epoch % 2 == 1: optimizer.param_groups[1]['weight_decay'] = 0 print 'weight_decay is set to 0' else: optimizer.param_groups[1]['weight_decay'] = weight_decay print 'weight_decay is set to %.4f' % weight_decay train(train_loader, net, criterion, optimizer, epoch, iter_freq_print_training_log, iter_freq_validate, val_loader, info)
def __init__(self, num_classes=1000): super(AlexNetOWT_BN, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2, bias=False), nn.MaxPool2d(kernel_size=3, stride=2), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 192, kernel_size=5, padding=2, bias=False), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.BatchNorm2d(192), nn.Conv2d(192, 384, kernel_size=3, padding=1, bias=False), nn.ReLU(inplace=True), nn.BatchNorm2d(384), nn.Conv2d(384, 256, kernel_size=3, padding=1, bias=False), nn.ReLU(inplace=True), nn.BatchNorm2d(256), nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False), nn.MaxPool2d(kernel_size=3, stride=2), nn.ReLU(inplace=True), nn.BatchNorm2d(256) ) self.classifier = nn.Sequential( nn.Linear(256 * 6 * 6, 4096, bias=False), nn.BatchNorm1d(4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, 4096, bias=False), nn.BatchNorm1d(4096), nn.ReLU(inplace=True), nn.Dropout(0.5), nn.Linear(4096, num_classes) ) self.regime = { 0: {'optimizer': 'SGD', 'lr': 1e-2, 'weight_decay': 5e-4, 'momentum': 0.9}, 10: {'lr': 5e-3}, 15: {'lr': 1e-3, 'weight_decay': 0}, 20: {'lr': 5e-4}, 25: {'lr': 1e-4} } normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) self.input_transform = { 'train': transforms.Compose([ transforms.Scale(256), transforms.RandomCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize ]), 'eval': transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize ]) }
def get_loaders(args): kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {} if args.dataset == 'mnist': trainLoader = torch.utils.data.DataLoader( dset.MNIST('data/mnist', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batchSz, shuffle=True, **kwargs) testLoader = torch.utils.data.DataLoader( dset.MNIST('data/mnist', train=False, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=args.batchSz, shuffle=False, **kwargs) elif args.dataset == 'cifar-10': normMean = [0.49139968, 0.48215827, 0.44653124] normStd = [0.24703233, 0.24348505, 0.26158768] normTransform = transforms.Normalize(normMean, normStd) trainTransform = transforms.Compose([ transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normTransform ]) testTransform = transforms.Compose([ transforms.ToTensor(), normTransform ]) trainLoader = DataLoader( dset.CIFAR10(root='data/cifar', train=True, download=True, transform=trainTransform), batch_size=args.batchSz, shuffle=True, **kwargs) testLoader = DataLoader( dset.CIFAR10(root='data/cifar', train=False, download=True, transform=testTransform), batch_size=args.batchSz, shuffle=False, **kwargs) else: assert(False) return trainLoader, testLoader
def getLoader(datasetName, dataroot, originalSize, imageSize, batchSize=64, workers=4, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), split='train', shuffle=True, seed=None): #import pdb; pdb.set_trace() if datasetName == 'trans': from datasets.trans import trans as commonDataset import transforms.pix2pix as transforms elif datasetName == 'folder': from torchvision.datasets.folder import ImageFolder as commonDataset import torchvision.transforms as transforms elif datasetName == 'pix2pix': from datasets.pix2pix import pix2pix as commonDataset import transforms.pix2pix as transforms if datasetName != 'folder': if split == 'train': dataset = commonDataset(root=dataroot, transform=transforms.Compose([ transforms.Scale(originalSize), transforms.RandomCrop(imageSize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std), ]), seed=seed) else: dataset = commonDataset(root=dataroot, transform=transforms.Compose([ transforms.Scale(originalSize), transforms.CenterCrop(imageSize), transforms.ToTensor(), transforms.Normalize(mean, std), ]), seed=seed) else: if split == 'train': dataset = commonDataset(root=dataroot, transform=transforms.Compose([ transforms.Scale(originalSize), transforms.RandomCrop(imageSize), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean, std), ])) else: dataset = commonDataset(root=dataroot, transform=transforms.Compose([ transforms.Scale(originalSize), transforms.CenterCrop(imageSize), transforms.ToTensor(), transforms.Normalize(mean, std), ])) assert dataset dataloader = torch.utils.data.DataLoader(dataset, batch_size=batchSize, shuffle=shuffle, num_workers=int(workers)) return dataloader