我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用caffe.set_mode_gpu()。
def __init__(self, net_proto, net_weights, device_id, input_size=None): caffe.set_mode_gpu() caffe.set_device(device_id) self._net = caffe.Net(net_proto, net_weights, caffe.TEST) input_shape = self._net.blobs['data'].data.shape if input_size is not None: input_shape = input_shape[:2] + input_size transformer = caffe.io.Transformer({'data': input_shape}) if self._net.blobs['data'].data.shape[1] == 3: transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel else: pass # non RGB data need not use transformer self._transformer = transformer self._sample_shape = self._net.blobs['data'].data.shape
def __init__(self, videoThread): threading.Thread.__init__(self) print "Initializing recognition thread..." self.videoThread = videoThread #caffe.set_mode_cpu() caffe.set_mode_gpu() caffe.set_device(0) # Model file and parameters are written by trainDnn.py # Take the most recent parameter set genderPath = "./dcnn_gender" genderParamFiles = glob.glob(genderPath + os.sep + "*.caffemodel") genderParamFiles = sorted(genderParamFiles, key=lambda x:os.path.getctime(x)) MODEL_FILE_GENDER = genderPath + os.sep + "deploy_gender.prototxt" PRETRAINED_GENDER = genderParamFiles[-1] MEAN_FILE_GENDER = genderPath + os.sep + "mean.binaryproto" proto_data = open(MEAN_FILE_GENDER, 'rb').read() a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data) mean = caffe.io.blobproto_to_array(a)[0] # Initialize net self.gender_net = caffe.Classifier(MODEL_FILE_GENDER, PRETRAINED_GENDER, image_dims=(227,227),)
def __init__(self, videoThread): threading.Thread.__init__(self) print "Initializing age recognition thread..." self.videoThread = videoThread #caffe.set_mode_cpu() caffe.set_mode_gpu() # Model file and parameters are written by trainDnn.py # Take the most recent parameter set dcnnPath = "./dcnn_age" paramFiles = glob.glob(dcnnPath + os.sep + "*.caffemodel") paramFiles = sorted(paramFiles, key=lambda x:os.path.getctime(x)) MODEL_FILE = dcnnPath + os.sep + "deploy.prototxt" PRETRAINED = paramFiles[-1] MEAN_FILE = dcnnPath + os.sep + "mean.binaryproto" blob = caffe.proto.caffe_pb2.BlobProto() with open(MEAN_FILE, 'rb') as f: data = f.read() blob.ParseFromString(data) # mean = np.array( caffe.io.blobproto_to_array(blob) ) [0] # Added simple mean mean = np.array([93.5940, 104.7624, 129.1863]) # Initialize net self.net = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=(224,224), mean=mean)
def __init__(self, solver_prototxt, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(0) self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param)
def layer_features(layers, model_file, deploy_file, imagemean_file, image_files, gpu=True, gpu_id=0, show_pred=False): """extract features from various layers""" if gpu: caffe.set_device(gpu_id) caffe.set_mode_gpu() net = feed_net(model_file, deploy_file, imagemean_file, image_files, show_pred) #if type(layers) == str: #return net.blobs[layers].data for layer in layers: if layer not in net.blobs: raise TypeError('Invalid layer name: ' + layer) yield (layer, net.blobs[layer].data)
def train_dir(nets, optim, optim2, dataloader, args): global image_size, it, image_sizes caffe.set_mode_gpu() if args.debug: image_sizes = [[416, 416]] while True: if it % 500 == 0: image_size = image_sizes[random.randint(0, len(image_sizes) - 1)] print(image_size) #im = cv2.imread('/home/busta/data/90kDICT32px/background/n03085781_3427.jpg') #try: process_batch(nets, optim, optim2, image_size, args) if it % valid_interval == 0: validate(nets, dataloader, image_size = [416, 416], split_words=False) #except: # continue
def __init__(self, model_def_file, pretrained_model_file, class_labels_file, gpu_mode): logging.info('Loading net and associated files...') if gpu_mode: caffe.set_mode_gpu() else: caffe.set_mode_cpu() self.net = caffe.Classifier( model_def_file, pretrained_model_file, image_dims=(400, 400), raw_scale=400, mean=np.load('{}/mean.npy'.format(REPO_DIRNAME)).mean(1).mean(1), channel_swap=(2, 1, 0) ) with open(class_labels_file) as f: labels_df = pd.DataFrame([ { 'synset_id': l.strip().split(' ')[0], 'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0] } for l in f.readlines() ]) self.labels = labels_df.sort('synset_id')['name'].values
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(gpu_id) self.solver = caffe.SGDSolver(solver) if pretrained_model is not None: print(('Loading pretrained model ' 'weights from {:s}').format(pretrained_model)) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_data(data)
def __init__(self, use_gpu=True, model=[]): ''' Init net. :param model: Network definition. ''' if model == []: raise("model should not be empty!") print("Init NetTester: Use gpu: {}").format(use_gpu) print("Network: {}").format(model) if use_gpu: caffe.set_device(0) caffe.set_mode_gpu() else: caffe.set_mode_cpu() self.__net = caffe.Net(model, caffe.TRAIN)
def __init__(self, minsize = 20, threshold = [0.6, 0.7, 0.7], factor = 0.709, fastresize = False, gpuid = 0): self.minsize = minsize self.threshold = threshold self.factor = factor self.fastresize = fastresize model_P = './model/det1.prototxt' weights_P = './model/det1.caffemodel' model_R = './model/det2.prototxt' weights_R = './model/det2.caffemodel' model_O = './model/det3.prototxt' weights_O = './model/det3.caffemodel' caffe.set_mode_gpu() caffe.set_device(gpuid) self.PNet = caffe.Net(model_P, weights_P, caffe.TEST) self.RNet = caffe.Net(model_R, weights_R, caffe.TEST) self.ONet = caffe.Net(model_O, weights_O, caffe.TEST)
def gen_net(): caffe.set_device(1) caffe.set_mode_gpu() filename = '2007_000032.jpg' im = Image.open(filename) m = np.asarray(im, dtype=np.float32) m = m[:,:,::-1] m -= np.array((104.00698793,116.66876762,122.67891434)) m = m.transpose((2, 0, 1)) net = caffe.Net( "deploy.prototxt", #"train_iter_" + str(num) + ".caffemodel", #"/data/VGG16/caffemodel", "good.caffemodel", caffe.TRAIN) net.blobs["data"].reshape(1, *m.shape) net.blobs["data"].data[...] = m net.forward() return net
def gen_net(num): caffe.set_device(0) caffe.set_mode_gpu() filename = '2007_000032.jpg' im = Image.open(filename) m = np.asarray(im, dtype=np.float32) m = m[:,:,::-1] m -= np.array((104.00698793,116.66876762,122.67891434)) m = m.transpose((2, 0, 1)) net = caffe.Net( "train_val.prototxt", "train_iter_" + str(num) + ".caffemodel", # "/data/VGG16/caffemodel", # "../fcn-32s/good.caffemodel", caffe.TRAIN) net.blobs["data"].reshape(1, *m.shape) net.blobs["data"].data[...] = m net.forward() return net
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) solver = caffe.SGDSolver(args.solver) if args.snapshot: print "Restoring history from {}".format(args.snapshot) solver.restore(args.snapshot) net = solver.net if args.weights: print "Copying weights from {}".format(args.weights) net.copy_from(args.weights) return solver, net
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) solver = caffe.SGDSolver(args.solver) if args.snapshot: print "Restoring history from {}".format(args.snapshot) solver.restore(args.snapshot) rnn = solver.net if args.weights: rnn.copy_from(args.weights) feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST) # apply bbox regression normalization on the net weights with open(args.bbox_mean, 'rb') as f: bbox_means = cPickle.load(f) with open(args.bbox_std, 'rb') as f: bbox_stds = cPickle.load(f) feature_net.params['bbox_pred_vid'][0].data[...] = \ feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis] feature_net.params['bbox_pred_vid'][1].data[...] = \ feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means return solver, feature_net, rnn, bbox_means, bbox_stds
def load_models(args): # load rnn model caffe.set_mode_gpu() if args.gpus is None: caffe.set_device(args.job_id - 1) else: assert args.job_id <= len(args.gpus) caffe.set_device(args.gpus[args.job_id-1]) if args.lstm_param is not '': rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST) print 'Loaded RNN network from {:s}.'.format(args.lstm_def) else: rnn_net = caffe.Net(args.lstm_def, caffe.TEST) print 'WARNING: dummy RNN network created.' # load feature model feature_net = caffe.Net(args.def_file, args.param, caffe.TEST) print 'Loaded feature network from {:s}.'.format(args.def_file) return feature_net, rnn_net
def __init__(self,params): self.dimension = params['dimension'] self.dataset = params['dataset'] self.pooling = params['pooling'] # Read image lists with open(params['query_list'],'r') as f: self.query_names = f.read().splitlines() with open(params['frame_list'],'r') as f: self.database_list = f.read().splitlines() # Parameters needed self.layer = params['layer'] self.save_db_feats = params['database_feats'] # Init network if params['gpu']: caffe.set_mode_gpu() caffe.set_device(0) else: caffe.set_mode_cpu() print "Extracting from:", params['net_proto'] cfg.TEST.HAS_RPN = True self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
def __init__(self, model_file, pretrained_file, mean_value=None, layer=['pool5'], input_size = None ): caffe.set_mode_gpu() caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) # get name input layer self.list_layers = layer self.mean_value = mean_value # set transformer object self.transformer = caffe.io.Transformer({'data': self.blobs['data'].data.shape}) self.transformer.set_transpose( 'data', (2,0,1) ) if mean_value is not None: self.transformer.set_mean('data', mean_value) self.transformer.set_raw_scale('data', 255) self.transformer.set_channel_swap('data', (2,1,0)) if input_size is not None: #reshape the input print "New input! {}".format(input_size) self.reshape_input( input_size[0], input_size[1], input_size[2], input_size[3] )
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
def __init__(self, hyperparams, dO, dU): config = copy.deepcopy(POLICY_OPT_CAFFE) config.update(hyperparams) PolicyOpt.__init__(self, config, dO, dU) self.batch_size = self._hyperparams['batch_size'] if self._hyperparams['use_gpu']: caffe.set_device(self._hyperparams['gpu_id']) caffe.set_mode_gpu() else: caffe.set_mode_cpu() self.init_solver() self.caffe_iter = 0 self.var = self._hyperparams['init_var'] * np.ones(dU) self.policy = CaffePolicy(self.solver.test_nets[0], self.solver.test_nets[1], self.var)
def run(self): caffe.set_mode_gpu() while self.videoThread.isTerminated() == False: while self.videoThread.isTerminated() == False and self.videoThread.getEventReady() == True: time.sleep(0.1) print("Gender recognition sleep") #print "Detecting..." crop = None while crop == None: crop, rectangle = self.videoThread.getCropEx(1) time.sleep(0.05) if crop == None: # No crops available yet time.sleep(0.1) crop = crop.astype(np.float32) propabilities = self.gender_net.predict([crop], oversample = False).ravel() #[Male, Female] self.videoThread.setGender(propabilities)
def run(self): caffe.set_mode_gpu() while self.videoThread.isTerminated() == False: while self.videoThread.isTerminated() == False and self.videoThread.getEventReady() == True: time.sleep(0.1) print("Age recognition sleep") #print "Detecting..." crop = None while crop == None: crop, rectangle = self.videoThread.getCropEx(0) time.sleep(0.05) if crop == None: # No crops available yet time.sleep(0.1) crop = crop.astype(np.float32) out = self.net.predict([crop], oversample = False).ravel() age = np.dot(out, range(101)) self.videoThread.setAge(age)
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ #if use_gpu: # caffe.set_mode_gpu() caffe.set_mode_cpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST) # Transformer function to perform image transformation
def get_net(caffemodel, deploy_file, use_gpu=True): """ Returns an instance of caffe.Net Arguments: caffemodel -- path to a .caffemodel file deploy_file -- path to a .prototxt file Keyword arguments: use_gpu -- if True, use the GPU for inference """ if use_gpu: caffe.set_mode_gpu() # load a new model return caffe.Net(deploy_file, caffemodel, caffe.TEST)
def get_aligner(caffe_model_path, use_more_stage=False): caffe.set_mode_gpu() # PNet = caffe.Net(caffe_model_path + "/det1.prototxt", # caffe_model_path + "/det1.caffemodel", caffe.TEST) if use_more_stage: RNet = caffe.Net(caffe_model_path + "/det2.prototxt", caffe_model_path + "/det2.caffemodel", caffe.TEST) else: RNet = None ONet = caffe.Net(caffe_model_path + "/det3.prototxt", caffe_model_path + "/det3.caffemodel", caffe.TEST) LNet = caffe.Net(caffe_model_path + "/det4.prototxt", caffe_model_path + "/det4.caffemodel", caffe.TEST) # return (PNet, RNet, ONet) return (RNet, ONet, LNet) # return (RNet, ONet, None)
def main(args): caffe.set_mode_gpu() param_pairs = [('fc6', 'fc6-conv'), ('fc7', 'fc7-conv'), ('fc8', 'fc8-conv')] make_fully_conv('/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers_deploy.prototxt', '/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers.caffemodel', '/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers_fcn_deploy.prototxt', param_pairs, '/home/pierre/tmpModels/VGG/VGG_ILSVRC_16_layers_conv.caffemodel', ) return 0
def main(argv): sport = 'long_jump' model = 'snap_iter_50000.caffemodel' #--- weights = model_root + 'fcn/' + sport + '/' + model netf = './fcn/' + sport + '/deploy.prototxt' gpu = 0 caffe.set_device(gpu) caffe.set_mode_gpu() net = caffe.Net(netf, weights, caffe.TEST) im_head = '/export/home/mfrank/data/OlympicSports/clips/' im_head = '/export/home/mfrank/data/OlympicSports/patches/' test_path_file = 'fcn/' + sport + '/test.txt' train_path_file = 'fcn/' + sport + '/train.txt' inferfile(net, train_path_file, im_head) ifp_morris.apply_overlayfcn(train_path_file, factor=4) inferfile(net, test_path_file, im_head) ifp_morris.apply_overlayfcn(test_path_file, factor=4)
def __init__(self, args): super().__init__(args, with_video_output=False) if self.vgg_model_path is None: self.vgg_model_path = "/media/" + getpass.getuser() + "/Data/AMBR_data/ml" self.vgg_model_filename = os.path.join(self.vgg_model_path, self.vgg_model_filename) self.vgg_pretrained_filename = os.path.join(self.vgg_model_path, self.vgg_pretrained_filename) if self.output_datafile is None: self.output_datafile = "{:s}_features.npz".format(self.in_video[:-4]) self.prev_frame_centroid = None if self.caffe_cpu: caffe.set_mode_cpu() else: caffe.set_mode_gpu() self.extractor = None self.blank_features = None if not self.no_vgg: self.extractor = VGGFeatureExtractor(model_file=self.vgg_model_filename, pretrained_file=self.vgg_pretrained_filename) self.blank_features = self.extractor.extract_single(np.zeros((256, 256, 3), dtype=np.uint8), blobs=['fc7'])[ 'fc7'] self.features = [] self.present_flags = []
def get_predictions(region_crops): if os.environ["IS_GPU"]: caffe.set_device(0) caffe.set_mode_gpu() else: caffe.set_mode_cpu() classifier = caffe.Classifier(os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "deploy.prototxt"), os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "weights.caffemodel"), mean=np.array([104, 117, 123], dtype='f4'), image_dims=[224, 224], raw_scale=255.0, channel_swap=[2, 1, 0]) LOGGER.info("Classifying " + str(len(region_crops)) + " inputs.") predictions = classifier.predict(region_crops) return predictions
def run(self, _, app_context): """run the action""" import caffe # init CPU/GPU mode cpu_mode = app_context.get_config('caffe.cpu_mode') if cpu_mode: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(0) # load test model test_model_file = "models/" + app_context.get_config('caffe.test_model') trained_data_file = "cache/data/" + app_context.get_config('caffe.trained_data') test_net = caffe.Net(test_model_file, trained_data_file, caffe.TEST) app_context.params['test_net'] = test_net logging.getLogger(__name__).info('Loaded neural network: ' + trained_data_file)
def _loadModel(self, model_dirs, id): print 'loading model...from{}'.format(model_dirs) model_file = osp.join(model_dirs, 'vgg16.prototxt') model_weights = osp.join(model_dirs, 'vgg16.caffemodel') mean_file = osp.join(model_dirs, 'vgg16_mean.npy') if id == -1: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(id) net = caffe.Net(model_file, model_weights, caffe.TEST) transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_mean('data', np.load(mean_file).mean(1).mean(1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_transpose('data', (2, 0, 1)) #transformer.set_raw_scale('data', 255) self.net = net self.transformer = transformer self.style_layers = VGG16_STYLES self.content_layers = VGG16_CONTENTS self.layers = VGG16_LAYERS print 'model loading done'
def get_caffe_model(caffe_dir, caffe_model, gpu=True, image_dims=(256, 256), mean_file='default', raw_scale=255.0, channel_swap=(2,1,0), input_scale=None): if mean_file == 'default': mean_file = os.path.join(caffe_dir, 'python', 'caffe', 'imagenet', 'ilsvrc_2012_mean.npy') model_path = os.path.join(caffe_dir, 'models', caffe_model, '%s.caffemodel'%caffe_model) model_def = os.path.join(caffe_dir, 'models', caffe_model, 'deploy.prototxt') print('Loading mean file %s' % mean_file) mean = np.load(mean_file).mean(1).mean(1) if gpu: caffe.set_mode_gpu() else: caffe.set_mode_cpu() net = caffe.Classifier(model_def, model_path, image_dims=image_dims, mean=mean, input_scale=input_scale, raw_scale=raw_scale, channel_swap=channel_swap) return net
def __init__(self, model_path, deploy_path, mean, crop = 227, layer = 'fc7'): self.net = caffe.Net(deploy_path, model_path, caffe.TEST) self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape}) self.transformer.set_mean('data', mean) self.transformer.set_transpose('data', (2, 0, 1)) self.transformer.set_channel_swap('data', (2, 1, 0)) self.transformer.set_raw_scale('data', 255.0) self.crop = crop self.image = [] self.layer = layer caffe.set_mode_gpu() print "Mean:", mean
def __init__(self, deploy, pretrained, mean, labels, gpu = False): if gpu: caffe.set_mode_gpu() else: caffe.set_mode_cpu() # in windows, only CPU mode supported self.__labels = self.load_labels(labels); mean_ar = self.convert(mean) if True: self.__net = caffe.Classifier(deploy, pretrained, mean = mean_ar.mean(1).mean(1), channel_swap = (2, 1, 0), raw_scale = 255, image_dims = (256, 256)) else: self.__net = caffe.Net(deploy, pretrained, caffe.TEST) print self.__net.blobs['data'].data.shape self.__transformer = caffe.io.Transformer({'data': self.__net.blobs['data'].data.shape}) self.__transformer.set_transpose('data', (2,0,1)) # height*width*channel -> channel*height*width self.__transformer.set_mean('data', mean_ar) self.__transformer.set_raw_scale('data', 255) self.__transformer.set_channel_swap('data', (2,1,0)) # RGB -> BGR
def __init__(self): caffe.set_mode_gpu() #caffe.set_device(0) model_path = '../models/bvlc_googlenet/' # substitute your path here net_fn = model_path + 'deploy.prototxt' param_fn = model_path + 'bvlc_googlenet.caffemodel' model = caffe.io.caffe_pb2.NetParameter() text_format.Merge(open(net_fn).read(), model) model.force_backward = True #backward to input layer open('tmp.prototxt', 'w').write(str(model)) self.net = caffe.Classifier('tmp.prototxt', param_fn, mean = np.float32([104.0, 116.0, 122.0]), channel_swap = (2,1,0)) # for the mode guide, if flag = 1 self.flag = 0 self.epoch = 20 self.end = 'inception_4c/output' #self.end = 'conv4'
def __init__(self, solver_prototxt, pretrained_model=None): """Initialize the SolverWrapper.""" self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe.io.caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: text_format.Merge(f.read(), self.solver_param) if self.solver_param.solver_mode == 1: caffe.set_mode_gpu() caffe.set_device(params.gpu_id) print 'Use GPU', params.gpu_id, 'to train' else: print 'Use CPU to train' #initial python data layer self.solver.net.layers[0].set_db()
def init_detection_net(self, gpu_id=0, prototxt=None, caffemodel=None): """init extraction network""" cfg.TEST.HAS_RPN = True # Use RPN for proposals if prototxt is None: prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS['zf'][0], 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt') if caffemodel is None: caffemodel = os.path.join(cfg.ROOT_DIR, 'output/default/train', NETS['zf'][1]) if not os.path.isfile(caffemodel): raise IOError(('{:s} not found.\nDid you run ./data/script/' 'fetch_faster_rcnn_models.sh?').format(caffemodel)) #np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) caffe.set_mode_gpu() caffe.set_device(gpu_id) self.net_d = caffe.Net(prototxt, caffemodel, caffe.TEST)
def __init__(self, solver_prototxt, pretrained_model=None): """Initialize the SolverWrapper.""" self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe.io.caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: text_format.Merge(f.read(), self.solver_param) if self.solver_param.solver_mode == 1: caffe.set_mode_gpu() caffe.set_device(params.gpu_id) print 'Use GPU', params.gpu_id, 'to train' else: print 'Use CPU to train' #initial python data layer #self.solver.net.layers[0].set_db()
def solve(proto, gpus, uid, rank, max_iter): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if rank == 0: # solver.restore(_snapshot) solver.net.copy_from(_weights) solver.net.layers[0].get_gpu_id(gpus[rank]) nccl = caffe.NCCL(solver, uid) nccl.bcast() solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) for _ in range(max_iter): solver.step(1)
def test_imdb_wiki_model(): # not finished sample_lst_fn = 'datasets/IMDB-WIKI/Annotations/imdb_wiki_good_test.json' img_root = 'datasets/IMDB-WIKI/Images' batch_size = 128 num_batch = 10 gpu_id = 0 fn_model = 'datasets/IMDB-WIKI/caffe_models/age.prototxt' fn_weight = 'datasets/IMDB-WIKI/caffe_models/dex_imdb_wiki.caffemodel' imagenet_mean = [[[104, 117, 123]]] caffe.set_device(gpu_id) caffe.set_mode_gpu() model = caffemodel(fn_model, fn_weight, caffe.TEST)
def _init_caffe(cfg): """Initialize pycaffe in a training process. """ import caffe # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID)
def setup(): global resnet_mean global resnet_net global vqa_net # data provider vqa_data_provider_layer.CURRENT_DATA_SHAPE = EXTRACT_LAYER_SIZE # mean substraction blob = caffe.proto.caffe_pb2.BlobProto() data = open( RESNET_MEAN_PATH , 'rb').read() blob.ParseFromString(data) resnet_mean = np.array( caffe.io.blobproto_to_array(blob)).astype(np.float32).reshape(3,224,224) resnet_mean = np.transpose(cv2.resize(np.transpose(resnet_mean,(1,2,0)), (448,448)),(2,0,1)) # resnet caffe.set_device(GPU_ID) caffe.set_mode_gpu() resnet_net = caffe.Net(RESNET_LARGE_PROTOTXT_PATH, RESNET_CAFFEMODEL_PATH, caffe.TEST) # our net vqa_net = caffe.Net(VQA_PROTOTXT_PATH, VQA_CAFFEMODEL_PATH, caffe.TEST) # uploads if not os.path.exists(UPLOAD_FOLDER): os.makedirs(UPLOAD_FOLDER) if not os.path.exists(VIZ_FOLDER): os.makedirs(VIZ_FOLDER) print 'Finished setup'