我们从Python开源项目中,提取了以下47个代码示例,用于说明如何使用fast_rcnn.config.cfg.ROOT_DIR。
def get_solvers(net_name): # Faster R-CNN Alternating Optimization n = 'faster_rcnn_alt_opt' # Solver for each training stage solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'], [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'], [net_name, n, 'stage2_rpn_solver60k80k.pt'], [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']] solvers = [os.path.join(cfg.ROOT_DIR, 'models', *s) for s in solvers] # Iterations for each training stage max_iters = [80000, 40000, 80000, 40000] # max_iters = [100, 100, 100, 100] # Test prototxt for the RPN rpn_test_prototxt = os.path.join( cfg.ROOT_DIR, 'models', net_name, n, 'rpn_test.pt') return solvers, max_iters, rpn_test_prototxt # ------------------------------------------------------------------------------ # Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded # (e.g. "del net" in Python code). To work around this issue, each training # stage is executed in a separate process using multiprocessing.Process. # ------------------------------------------------------------------------------
def visualization_plus(net, layer_name, save_dir): save_path = os.path.join(cfg.ROOT_DIR, 'visualization_plus', save_dir) if not os.path.exists(save_path): os.makedirs(save_path) feat = net.blobs[layer_name].data[0] fm = feat[0] print type(fm) print fm.shape for f in feat: fm += f fm = fm - feat[0] print 'fm max = {}, min = {}'.format(fm.max(), fm.min()) #fm -= fm.min() #fm /= fm.max() fm *=255 i = 0 cv2.imwrite(os.path.join(save_path, '{:s}.png'.format(layer_name)), fm)
def get_solvers(net_name): # Faster R-CNN Alternating Optimization n = 'faster_rcnn_alt_opt' # Solver for each training stage solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'], [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'], [net_name, n, 'stage2_rpn_solver60k80k.pt'], [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']] solvers = [os.path.join(cfg.ROOT_DIR, 'models', *s) for s in solvers] # Iterations for each training stage #max_iters = [80000, 40000, 80000, 40000] max_iters = [40, 40, 40, 40] #Test prototxt for the RPN rpn_test_prototxt = os.path.join( cfg.ROOT_DIR, 'models', net_name, n, 'rpn_test.pt') return solvers, max_iters, rpn_test_prototxt # ------------------------------------------------------------------------------ # Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded # (e.g. "del net" in Python code). To work around this issue, each training # stage is executed in a separate process using multiprocessing.Process. # ------------------------------------------------------------------------------
def get_solvers(net_name): # Faster R-CNN Alternating Optimization n = 'faster_rcnn_alt_opt' # Solver for each training stage solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'], [net_name, n, 'stage1_fast_rcnn_ohem_solver30k40k.pt'], [net_name, n, 'stage2_rpn_solver60k80k.pt'], [net_name, n, 'stage2_fast_rcnn_ohem_solver30k40k.pt']] solvers = [os.path.join(cfg.ROOT_DIR, 'models', *s) for s in solvers] #Iterations for each training stage max_iters = [80000, 40000, 80000, 10000] #max_iters = [50, 50, 50, 50] # Test prototxt for the RPN rpn_test_prototxt = os.path.join( cfg.ROOT_DIR, 'models', net_name, n, 'rpn_test.pt') return solvers, max_iters, rpn_test_prototxt # ------------------------------------------------------------------------------ # Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded # (e.g. "del net" in Python code). To work around this issue, each training # stage is executed in a separate process using multiprocessing.Process. # ------------------------------------------------------------------------------
def init_detection_net(self, gpu_id=0, prototxt=None, caffemodel=None): """init extraction network""" cfg.TEST.HAS_RPN = True # Use RPN for proposals if prototxt is None: prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS['zf'][0], 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt') if caffemodel is None: caffemodel = os.path.join(cfg.ROOT_DIR, 'output/default/train', NETS['zf'][1]) if not os.path.isfile(caffemodel): raise IOError(('{:s} not found.\nDid you run ./data/script/' 'fetch_faster_rcnn_models.sh?').format(caffemodel)) #np.random.seed(cfg.RNG_SEED) caffe.set_random_seed(cfg.RNG_SEED) caffe.set_mode_gpu() caffe.set_device(gpu_id) self.net_d = caffe.Net(prototxt, caffemodel, caffe.TEST)
def _do_matlab_eval(self, output_dir='output'): print '-----------------------------------------------------' print 'Computing results with the official MATLAB eval code.' print '-----------------------------------------------------' path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets', 'VOCdevkit-matlab-wrapper') cmd = 'cd {} && '.format(path) cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB) cmd += '-r "dbstop if error; ' cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \ .format(self._devkit_path, self._get_comp_id(), self._image_set, output_dir) print('Running:\n{}'.format(cmd)) status = subprocess.call(cmd, shell=True)
def demo(net, image_name, classes): """Detect object classes in an image using pre-computed object proposals.""" # Load pre-computed Selected Search object proposals box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '_boxes.mat') obj_proposals = sio.loadmat(box_file)['boxes'] # Load the demo image im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg') im = cv2.imread(im_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(net, im, obj_proposals) timer.toc() print ('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls in classes: cls_ind = CLASSES.index(cls) cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls, CONF_THRESH) vis_detections(im, cls, dets, thresh=CONF_THRESH)
def demo(net, image_name): """Detect object classes in an image using pre-computed object proposals.""" # Load the demo image im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name) im = cv2.imread(im_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(net, im) timer.toc() print ('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] vis_detections(im, cls, dets, thresh=CONF_THRESH)
def _get_default_path(self): """ Return the default path where PASCAL VOC is expected to be installed. """ return os.path.join(datasets.ROOT_DIR, 'data', 'VOCdevkit' + self._year)
def visualization(net, layer_name, save_dir): save_path = os.path.join(cfg.ROOT_DIR, 'visualization', save_dir, layer_name) if not os.path.exists(save_path): os.makedirs(save_path) feat = net.blobs[layer_name].data[0] print feat.shape feat -= feat.min() feat /= feat.max() feat *=255 i = 0 for im in feat: #iFColor = FColor(im) cv2.imwrite(os.path.join(save_path, '{:d}.png'.format(i)), im) i = i + 1 #vis_square(feat, padval=1)
def demo(net, dir_name, image_name): """Detect object classes in an image using pre-computed object proposals.""" # Load the demo image im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo','test',dir_name, image_name) im = cv2.imread(im_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(net, im) timer.toc() print ('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] vis_detections(im, cls, dets, dir_name, image_name, thresh=CONF_THRESH)
def _get_default_path(self): """ Return the default path where KITTI is expected to be installed......? """ return os.path.join(datasets.ROOT_DIR, 'data', 'building_data') # ---------------------------------------------------------------------------------------------------------------- #
def build_net(): prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS[args.demo_net][0], 'faster_rcnn_alt_opt', 'faster_rcnn_test.pt') caffemodel = os.path.join(cfg.ROOT_DIR, 'data', 'faster_rcnn_models', NETS[args.demo_net][1]) caffe.set_mode_gpu() caffe.set_device(args.gpu_id) cfg.GPU_ID = args.gpu_id net = caffe.Net(prototxt, caffemodel, caffe.TEST) return net
def demo(net, image_name): """Detect object classes in an image using pre-computed object proposals.""" # Load the demo image # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name) im = cv2.imread(image_name) assert im!=None # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(net, im) timer.toc() print ('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls_ind, cls in enumerate(CLASSES[1:]): cls_ind += 1 # because we skipped background cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) if NMS_THRESH_ENABLE: keep = nms(dets, NMS_THRESH) dets = dets[keep, :] return dets # vis_detections(im, cls, dets, thresh=CONF_THRESH)
def demo(net, image_name, classes): """Detect object classes in an image using pre-computed object proposals.""" # Load pre-computed Selected Search object proposals box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '_boxes.mat') obj_proposals = sio.loadmat(box_file)['boxes'] # Load the demo image im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg') im = cv2.imread(im_file) # Detect all object classes and regress object bounds timer = Timer() timer.tic() scores, boxes = im_detect(net, im, obj_proposals, len(classes)) timer.toc() print ('Detection took {:.3f}s for ' '{:d} object proposals').format(timer.total_time, boxes.shape[0]) # Visualize detections for each class CONF_THRESH = 0.8 NMS_THRESH = 0.3 for cls in classes: cls_ind = CLASSES.index(cls) cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)] cls_scores = scores[:, cls_ind] dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32) keep = nms(dets, NMS_THRESH) dets = dets[keep, :] print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls, CONF_THRESH) vis_detections(im, cls, dets, thresh=CONF_THRESH)