我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用caffe.set_device()。
def __init__(self, net_proto, net_weights, device_id, input_size=None): caffe.set_mode_gpu() caffe.set_device(device_id) self._net = caffe.Net(net_proto, net_weights, caffe.TEST) input_shape = self._net.blobs['data'].data.shape if input_size is not None: input_shape = input_shape[:2] + input_size transformer = caffe.io.Transformer({'data': input_shape}) if self._net.blobs['data'].data.shape[1] == 3: transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel else: pass # non RGB data need not use transformer self._transformer = transformer self._sample_shape = self._net.blobs['data'].data.shape
def __init__(self, videoThread): threading.Thread.__init__(self) print "Initializing recognition thread..." self.videoThread = videoThread #caffe.set_mode_cpu() caffe.set_mode_gpu() caffe.set_device(0) # Model file and parameters are written by trainDnn.py # Take the most recent parameter set genderPath = "./dcnn_gender" genderParamFiles = glob.glob(genderPath + os.sep + "*.caffemodel") genderParamFiles = sorted(genderParamFiles, key=lambda x:os.path.getctime(x)) MODEL_FILE_GENDER = genderPath + os.sep + "deploy_gender.prototxt" PRETRAINED_GENDER = genderParamFiles[-1] MEAN_FILE_GENDER = genderPath + os.sep + "mean.binaryproto" proto_data = open(MEAN_FILE_GENDER, 'rb').read() a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data) mean = caffe.io.blobproto_to_array(a)[0] # Initialize net self.gender_net = caffe.Classifier(MODEL_FILE_GENDER, PRETRAINED_GENDER, image_dims=(227,227),)
def __init__(self, solver_prototxt, output_dir, pretrained_model=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(0) self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param)
def layer_features(layers, model_file, deploy_file, imagemean_file, image_files, gpu=True, gpu_id=0, show_pred=False): """extract features from various layers""" if gpu: caffe.set_device(gpu_id) caffe.set_mode_gpu() net = feed_net(model_file, deploy_file, imagemean_file, image_files, show_pred) #if type(layers) == str: #return net.blobs[layers].data for layer in layers: if layer not in net.blobs: raise TypeError('Invalid layer name: ' + layer) yield (layer, net.blobs[layer].data)
def __init__(self, solver, output_dir, pretrained_model=None, gpu_id=0, data=None): """Initialize the SolverWrapper.""" self.output_dir = output_dir caffe.set_mode_gpu() caffe.set_device(gpu_id) self.solver = caffe.SGDSolver(solver) if pretrained_model is not None: print(('Loading pretrained model ' 'weights from {:s}').format(pretrained_model)) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe_pb2.SolverParameter() with open(solver, 'rt') as f: pb2.text_format.Merge(f.read(), self.solver_param) self.solver.net.layers[0].set_data(data)
def __init__(self, use_gpu=True, model=[]): ''' Init net. :param model: Network definition. ''' if model == []: raise("model should not be empty!") print("Init NetTester: Use gpu: {}").format(use_gpu) print("Network: {}").format(model) if use_gpu: caffe.set_device(0) caffe.set_mode_gpu() else: caffe.set_mode_cpu() self.__net = caffe.Net(model, caffe.TRAIN)
def __init__(self, minsize = 20, threshold = [0.6, 0.7, 0.7], factor = 0.709, fastresize = False, gpuid = 0): self.minsize = minsize self.threshold = threshold self.factor = factor self.fastresize = fastresize model_P = './model/det1.prototxt' weights_P = './model/det1.caffemodel' model_R = './model/det2.prototxt' weights_R = './model/det2.caffemodel' model_O = './model/det3.prototxt' weights_O = './model/det3.caffemodel' caffe.set_mode_gpu() caffe.set_device(gpuid) self.PNet = caffe.Net(model_P, weights_P, caffe.TEST) self.RNet = caffe.Net(model_R, weights_R, caffe.TEST) self.ONet = caffe.Net(model_O, weights_O, caffe.TEST)
def gen_net(): caffe.set_device(1) caffe.set_mode_gpu() filename = '2007_000032.jpg' im = Image.open(filename) m = np.asarray(im, dtype=np.float32) m = m[:,:,::-1] m -= np.array((104.00698793,116.66876762,122.67891434)) m = m.transpose((2, 0, 1)) net = caffe.Net( "deploy.prototxt", #"train_iter_" + str(num) + ".caffemodel", #"/data/VGG16/caffemodel", "good.caffemodel", caffe.TRAIN) net.blobs["data"].reshape(1, *m.shape) net.blobs["data"].data[...] = m net.forward() return net
def gen_net(num): caffe.set_device(0) caffe.set_mode_gpu() filename = '2007_000032.jpg' im = Image.open(filename) m = np.asarray(im, dtype=np.float32) m = m[:,:,::-1] m -= np.array((104.00698793,116.66876762,122.67891434)) m = m.transpose((2, 0, 1)) net = caffe.Net( "train_val.prototxt", "train_iter_" + str(num) + ".caffemodel", # "/data/VGG16/caffemodel", # "../fcn-32s/good.caffemodel", caffe.TRAIN) net.blobs["data"].reshape(1, *m.shape) net.blobs["data"].data[...] = m net.forward() return net
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) solver = caffe.SGDSolver(args.solver) if args.snapshot: print "Restoring history from {}".format(args.snapshot) solver.restore(args.snapshot) net = solver.net if args.weights: print "Copying weights from {}".format(args.weights) net.copy_from(args.weights) return solver, net
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) solver = caffe.SGDSolver(args.solver) if args.snapshot: print "Restoring history from {}".format(args.snapshot) solver.restore(args.snapshot) rnn = solver.net if args.weights: rnn.copy_from(args.weights) feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST) # apply bbox regression normalization on the net weights with open(args.bbox_mean, 'rb') as f: bbox_means = cPickle.load(f) with open(args.bbox_std, 'rb') as f: bbox_stds = cPickle.load(f) feature_net.params['bbox_pred_vid'][0].data[...] = \ feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis] feature_net.params['bbox_pred_vid'][1].data[...] = \ feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means return solver, feature_net, rnn, bbox_means, bbox_stds
def load_models(args): # load rnn model caffe.set_mode_gpu() if args.gpus is None: caffe.set_device(args.job_id - 1) else: assert args.job_id <= len(args.gpus) caffe.set_device(args.gpus[args.job_id-1]) if args.lstm_param is not '': rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST) print 'Loaded RNN network from {:s}.'.format(args.lstm_def) else: rnn_net = caffe.Net(args.lstm_def, caffe.TEST) print 'WARNING: dummy RNN network created.' # load feature model feature_net = caffe.Net(args.def_file, args.param, caffe.TEST) print 'Loaded feature network from {:s}.'.format(args.def_file) return feature_net, rnn_net
def __init__(self,params): self.dimension = params['dimension'] self.dataset = params['dataset'] self.pooling = params['pooling'] # Read image lists with open(params['query_list'],'r') as f: self.query_names = f.read().splitlines() with open(params['frame_list'],'r') as f: self.database_list = f.read().splitlines() # Parameters needed self.layer = params['layer'] self.save_db_feats = params['database_feats'] # Init network if params['gpu']: caffe.set_mode_gpu() caffe.set_device(0) else: caffe.set_mode_cpu() print "Extracting from:", params['net_proto'] cfg.TEST.HAS_RPN = True self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
def solve(proto, snapshot, gpus, timing, uid, rank): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if snapshot and len(snapshot) != 0: solver.restore(snapshot) nccl = caffe.NCCL(solver, uid) nccl.bcast() if timing and rank == 0: time(solver, nccl) else: solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) solver.step(solver.param.max_iter)
def __init__(self, hyperparams, dO, dU): config = copy.deepcopy(POLICY_OPT_CAFFE) config.update(hyperparams) PolicyOpt.__init__(self, config, dO, dU) self.batch_size = self._hyperparams['batch_size'] if self._hyperparams['use_gpu']: caffe.set_device(self._hyperparams['gpu_id']) caffe.set_mode_gpu() else: caffe.set_mode_cpu() self.init_solver() self.caffe_iter = 0 self.var = self._hyperparams['init_var'] * np.ones(dU) self.policy = CaffePolicy(self.solver.test_nets[0], self.solver.test_nets[1], self.var)
def main(argv): sport = 'long_jump' model = 'snap_iter_50000.caffemodel' #--- weights = model_root + 'fcn/' + sport + '/' + model netf = './fcn/' + sport + '/deploy.prototxt' gpu = 0 caffe.set_device(gpu) caffe.set_mode_gpu() net = caffe.Net(netf, weights, caffe.TEST) im_head = '/export/home/mfrank/data/OlympicSports/clips/' im_head = '/export/home/mfrank/data/OlympicSports/patches/' test_path_file = 'fcn/' + sport + '/test.txt' train_path_file = 'fcn/' + sport + '/train.txt' inferfile(net, train_path_file, im_head) ifp_morris.apply_overlayfcn(train_path_file, factor=4) inferfile(net, test_path_file, im_head) ifp_morris.apply_overlayfcn(test_path_file, factor=4)
def get_predictions(region_crops): if os.environ["IS_GPU"]: caffe.set_device(0) caffe.set_mode_gpu() else: caffe.set_mode_cpu() classifier = caffe.Classifier(os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "deploy.prototxt"), os.path.join(os.environ["TEXT_NOTEXT_MODELS_DIR"], "weights.caffemodel"), mean=np.array([104, 117, 123], dtype='f4'), image_dims=[224, 224], raw_scale=255.0, channel_swap=[2, 1, 0]) LOGGER.info("Classifying " + str(len(region_crops)) + " inputs.") predictions = classifier.predict(region_crops) return predictions
def run(self, _, app_context): """run the action""" import caffe # init CPU/GPU mode cpu_mode = app_context.get_config('caffe.cpu_mode') if cpu_mode: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(0) # load test model test_model_file = "models/" + app_context.get_config('caffe.test_model') trained_data_file = "cache/data/" + app_context.get_config('caffe.trained_data') test_net = caffe.Net(test_model_file, trained_data_file, caffe.TEST) app_context.params['test_net'] = test_net logging.getLogger(__name__).info('Loaded neural network: ' + trained_data_file)
def _loadModel(self, model_dirs, id): print 'loading model...from{}'.format(model_dirs) model_file = osp.join(model_dirs, 'vgg16.prototxt') model_weights = osp.join(model_dirs, 'vgg16.caffemodel') mean_file = osp.join(model_dirs, 'vgg16_mean.npy') if id == -1: caffe.set_mode_cpu() else: caffe.set_mode_gpu() caffe.set_device(id) net = caffe.Net(model_file, model_weights, caffe.TEST) transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape}) transformer.set_mean('data', np.load(mean_file).mean(1).mean(1)) transformer.set_channel_swap('data', (2, 1, 0)) transformer.set_transpose('data', (2, 0, 1)) #transformer.set_raw_scale('data', 255) self.net = net self.transformer = transformer self.style_layers = VGG16_STYLES self.content_layers = VGG16_CONTENTS self.layers = VGG16_LAYERS print 'model loading done'
def __init__(self): caffe.set_mode_gpu() #caffe.set_device(0) model_path = '../models/bvlc_googlenet/' # substitute your path here net_fn = model_path + 'deploy.prototxt' param_fn = model_path + 'bvlc_googlenet.caffemodel' model = caffe.io.caffe_pb2.NetParameter() text_format.Merge(open(net_fn).read(), model) model.force_backward = True #backward to input layer open('tmp.prototxt', 'w').write(str(model)) self.net = caffe.Classifier('tmp.prototxt', param_fn, mean = np.float32([104.0, 116.0, 122.0]), channel_swap = (2,1,0)) # for the mode guide, if flag = 1 self.flag = 0 self.epoch = 20 self.end = 'inception_4c/output' #self.end = 'conv4'
def __init__(self, solver_prototxt, pretrained_model=None): """Initialize the SolverWrapper.""" self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe.io.caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: text_format.Merge(f.read(), self.solver_param) if self.solver_param.solver_mode == 1: caffe.set_mode_gpu() caffe.set_device(params.gpu_id) print 'Use GPU', params.gpu_id, 'to train' else: print 'Use CPU to train' #initial python data layer self.solver.net.layers[0].set_db()
def init_detection_net(self, gpu_id=0, prototxt=None, caffemodel=None): """init extraction network""" cfg.TEST.HAS_RPN = True # Use RPN for proposals if prototxt is None: prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS['zf'][0], 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt') if caffemodel is None: caffemodel = os.path.join(cfg.ROOT_DIR, 'output/default/train', NETS['zf'][1]) if not os.path.isfile(caffemodel): raise IOError(('{:s} not found.\nDid you run ./data/script/' 'fetch_faster_rcnn_models.sh?').format(caffemodel)) #np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) caffe.set_mode_gpu() caffe.set_device(gpu_id) self.net_d = caffe.Net(prototxt, caffemodel, caffe.TEST)
def __init__(self, solver_prototxt, pretrained_model=None): """Initialize the SolverWrapper.""" self.solver = caffe.SGDSolver(solver_prototxt) if pretrained_model is not None: print ('Loading pretrained model ' 'weights from {:s}').format(pretrained_model) self.solver.net.copy_from(pretrained_model) self.solver_param = caffe.io.caffe_pb2.SolverParameter() with open(solver_prototxt, 'rt') as f: text_format.Merge(f.read(), self.solver_param) if self.solver_param.solver_mode == 1: caffe.set_mode_gpu() caffe.set_device(params.gpu_id) print 'Use GPU', params.gpu_id, 'to train' else: print 'Use CPU to train' #initial python data layer #self.solver.net.layers[0].set_db()
def solve(proto, gpus, uid, rank, max_iter): caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) solver = caffe.SGDSolver(proto) if rank == 0: # solver.restore(_snapshot) solver.net.copy_from(_weights) solver.net.layers[0].get_gpu_id(gpus[rank]) nccl = caffe.NCCL(solver, uid) nccl.bcast() solver.add_callback(nccl) if solver.param.layer_wise_reduce: solver.net.after_backward(nccl) for _ in range(max_iter): solver.step(1)
def test_imdb_wiki_model(): # not finished sample_lst_fn = 'datasets/IMDB-WIKI/Annotations/imdb_wiki_good_test.json' img_root = 'datasets/IMDB-WIKI/Images' batch_size = 128 num_batch = 10 gpu_id = 0 fn_model = 'datasets/IMDB-WIKI/caffe_models/age.prototxt' fn_weight = 'datasets/IMDB-WIKI/caffe_models/dex_imdb_wiki.caffemodel' imagenet_mean = [[[104, 117, 123]]] caffe.set_device(gpu_id) caffe.set_mode_gpu() model = caffemodel(fn_model, fn_weight, caffe.TEST)
def _init_caffe(cfg): """Initialize pycaffe in a training process. """ import caffe # fix the random seeds (numpy and caffe) for reproducibility np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) # set up caffe caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID)
def setup(): global resnet_mean global resnet_net global vqa_net # data provider vqa_data_provider_layer.CURRENT_DATA_SHAPE = EXTRACT_LAYER_SIZE # mean substraction blob = caffe.proto.caffe_pb2.BlobProto() data = open( RESNET_MEAN_PATH , 'rb').read() blob.ParseFromString(data) resnet_mean = np.array( caffe.io.blobproto_to_array(blob)).astype(np.float32).reshape(3,224,224) resnet_mean = np.transpose(cv2.resize(np.transpose(resnet_mean,(1,2,0)), (448,448)),(2,0,1)) # resnet caffe.set_device(GPU_ID) caffe.set_mode_gpu() resnet_net = caffe.Net(RESNET_LARGE_PROTOTXT_PATH, RESNET_CAFFEMODEL_PATH, caffe.TEST) # our net vqa_net = caffe.Net(VQA_PROTOTXT_PATH, VQA_CAFFEMODEL_PATH, caffe.TEST) # uploads if not os.path.exists(UPLOAD_FOLDER): os.makedirs(UPLOAD_FOLDER) if not os.path.exists(VIZ_FOLDER): os.makedirs(VIZ_FOLDER) print 'Finished setup'
def __init__(self, mean, weight, K, num_act, num_step=1, data_path='test'): self.K = K self.num_act = num_act self.num_step = num_step caffe.set_mode_gpu() caffe.set_device(0) test_net_file, net_proto = N.create_netfile(1, data_path, mean, K, K, 1, num_act, num_step=self.num_step, mode='test') self.test_net = caffe.Net(test_net_file, caffe.TEST) self.test_net.copy_from(weight)
def ctpnSource(): DEMO_IMAGE_DIR = "img/" NET_DEF_FILE = "CTPN/models/deploy.prototxt" MODEL_FILE = "CTPN/models/ctpn_trained_model.caffemodel" caffe.set_mode_gpu() caffe.set_device(cfg.TEST_GPU_ID) # initialize the detectors text_proposals_detector = TextProposalDetector(CaffeModel(NET_DEF_FILE, MODEL_FILE)) text_detector = TextDetector(text_proposals_detector) return text_detector
def main(): parser = argparse.ArgumentParser() parser.add_argument('dataset', nargs='?', choices=['pascal_voc', 'camvid', 'kitti', 'cityscapes']) parser.add_argument('input_path', nargs='?', default='', help='Required path to input image') parser.add_argument('-o', '--output_path', default=None) parser.add_argument('--gpu', type=int, default=-1, help='GPU ID to run CAFFE. ' 'If -1 (default), CPU is used') args = parser.parse_args() if args.input_path == '': raise IOError('Error: No path to input image') if not exists(args.input_path): raise IOError("Error: Can't find input image " + args.input_path) if args.gpu >= 0: caffe.set_mode_gpu() caffe.set_device(args.gpu) print('Using GPU ', args.gpu) else: caffe.set_mode_cpu() print('Using CPU') if args.output_path is None: args.output_path = '{}_{}.png'.format( splitext(args.input_path)[0], args.dataset) predict(args.dataset, args.input_path, args.output_path)
def main(argv): model_filename = '' weight_filename = '' img_filename = '' try: opts, args = getopt.getopt(argv, "hm:w:i:") print opts except getopt.GetoptError: print 'yolo_main.py -m <model_file> -w <output_file> -i <img_file>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'yolo_main.py -m <model_file> -w <weight_file> -i <img_file>' sys.exit() elif opt == "-m": model_filename = arg elif opt == "-w": weight_filename = arg elif opt == "-i": img_filename = arg print 'model file is "', model_filename print 'weight file is "', weight_filename print 'image file is "', img_filename caffe.set_device(0) caffe.set_mode_gpu() net = caffe.Net(model_filename, weight_filename, caffe.TEST) img = caffe.io.load_image(img_filename) # load the image using caffe io img_ = scipy.misc.imresize(img, (448, 448)) transformer = SimpleTransformer([104.00699, 116.66877, 122.67892]) input = transformer.preprocess(img_) out = net.forward_all(data=input) print out.iteritems() img_cv = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) results = interpret_output(out['result'][0], img.shape[1], img.shape[0]) # fc27 instead of fc12 for yolo_small show_results(img_cv, results, img.shape[1], img.shape[0]) cv2.waitKey(0)
def LoadNet(self,model,weights): caffe.set_mode_gpu() caffe.set_device(0) Net = caffe.Net(model, weights, caffe.TEST) return Net
def net(): """Delay loading the net until the last possible moment. Loading the net is SLOW and produces a ton of terminal garbage. Also we want to wait to load it until we have called some other caffe initializations code (caffe.set_mode_gpu(), caffe.set_device(0), etc) """ global __net if __net is None: __net = caffe.Net(LAYERS, WEIGHTS, caffe.TEST) return __net
def load_models(args): # load rnn model config = TestConfig() config.num_layers = args.lstm_num config.type = args.lstm_type config.hidden_size = config.input_size = args.lstm_input_size #tf.set_random_seed(1017) sess_config = tf.ConfigProto() # sess_config.gpu_options.allow_growth=True with tf.Graph().as_default(): session = tf.Session(config=sess_config) initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale) with tf.variable_scope("model", reuse=None, initializer=None): # with tf.device("/gpu:{}".format(args.job_id)): with tf.device("/cpu:0"): print "Constructing RNN network..." rnn_net = TPNModel(is_training=False, config=config) # restoring variables saver = tf.train.Saver() print "Starting loading session..." saver.restore(session, args.lstm_path) print 'Loaded RNN network from {:s}.'.format(args.lstm_path) # load feature model caffe.set_mode_gpu() caffe.set_device(args.job_id - 1) feature_net = caffe.Net(args.def_file, args.param, caffe.TEST) print 'Loaded feature network from {:s}.'.format(args.def_file) return feature_net, rnn_net, session
def load_nets(args, cur_gpu): # initialize solver and feature net, # RNN should be initialized before CNN, because CNN cudnn conv layers # may assume using all available memory caffe.set_mode_gpu() caffe.set_device(cur_gpu) net = caffe.Net(args.model, args.weights, caffe.TEST) return net
def train_net(solver_prototxt, roidb, output_dir, nccl_uid, gpus, rank, queue, bbox_means, bbox_stds, pretrained_model=None, max_iters=40000): """Train a Fast R-CNN network.""" caffe.set_mode_gpu() caffe.set_device(gpus[rank]) caffe.set_solver_count(len(gpus)) caffe.set_solver_rank(rank) caffe.set_multiprocess(True) caffe.set_random_seed(cfg.RNG_SEED) sw = SolverWrapper(solver_prototxt, roidb, output_dir, nccl_uid, rank, bbox_means, bbox_stds, pretrained_model=pretrained_model) model_paths = sw.train_model(max_iters) if rank==0: queue.put(model_paths)
def rpn_generate_single_gpu(prototxt, caffemodel, imdb, rank, gpus, output_dir): cfg.GPU_ID = gpus[rank] caffe.set_mode_gpu() caffe.set_device(cfg.GPU_ID) net = caffe.Net(prototxt, caffemodel, caffe.TEST) imdb_boxes = imdb_proposals(net, imdb, rank, len(gpus), output_dir)
def prep_for_deploy(self, batch_size, source_net=False, target_net=False, deploy_fn='deploy.proto', caffemodel_fn='score.caffemodel', gpu_id=0): caffe.set_mode_gpu() caffe.set_device(gpu_id) self.generate_deploy_proto(deploy_fn, batch_size, source_net=source_net, target_net=target_net) self.deploy = caffe.Net(deploy_fn, caffe.TEST, weights=caffemodel_fn) self._set_semantics(self.deploy, source=False, init_cw=False) self._set_semantics(self.deploy, source=True, init_cw=False)
def prep_for_training(self, model_fn, solver_fn, dt_lmdbs, sem_lmdbs, trainOpts, batch_size, gpu_id): caffe.set_mode_gpu() caffe.set_device(gpu_id) self.generate_train_proto(model_fn, dt_lmdbs, sem_lmdbs, batch_size) self.generate_solver_proto(solver_fn, model_fn, trainOpts=trainOpts) solver = caffe.NesterovSolver(solver_fn) self.base_cnn.load_pretrained(solver.net) self._set_semantics(solver.net, source=True, init_cw=True) self._set_semantics(solver.test_nets[1], source=False, init_cw=True) self.solver = solver
def __init__(self, path_to_deploy_file, path_to_model_file, input_layer_name="data_q", gpu_mode=True, device_id=1, height=None, width=None): self.path_to_deploy_file = path_to_deploy_file self.path_to_model_file = path_to_model_file if gpu_mode: caffe.set_mode_gpu() caffe.set_device(device_id) else: caffe.set_mode_cpu() self.net = caffe.Net(path_to_deploy_file, path_to_model_file, caffe.TEST) self.input_layer_name = input_layer_name self.height = height or self.net.blobs[self.input_layer_name].data.shape[2] self.width = width or self.net.blobs[self.input_layer_name].data.shape[3]
def start(self, rank): self.rank = rank if len(self.gpus) > 0: self.device = self.gpus[rank] if debug: s = 'solver gpu %d' % self.gpus[self.rank] + \ ' pid %d' % os.getpid() + ' size %d' % self.size + \ ' rank %d' % self.rank print(s, file = sys.stderr) caffe.set_mode_gpu() caffe.set_device(self.device) caffe.set_solver_count(self.size) caffe.set_solver_rank(self.rank) caffe.set_multiprocess(True) else: print('solver cpu', file = sys.stderr) caffe.set_mode_cpu() if self.cmd.graph.endswith('.json'): with open(self.cmd.graph, mode = 'r') as f: graph = caffe_pb2.SolverParameter() text_format.Merge(f.read(), graph) self.graph = graph else: self.graph = self.solver_graph() import tempfile with tempfile.NamedTemporaryFile(mode = 'w+', delete = False) as f: text_format.PrintMessage(self.graph, f) tmp = f.name self.caffe = caffe.AdamSolver(tmp) if self.uid: self.nccl = caffe.NCCL(self.caffe, self.uid) self.nccl.bcast() self.caffe.add_callback(self.nccl) if self.caffe.param.layer_wise_reduce: self.caffe.net.after_backward(self.nccl)