我们从Python开源项目中,提取了以下17个代码示例,用于说明如何使用cv2.ORB_create()。
def test_compute_matches(self): orb = cv2.ORB_create(10000) orb.setFastThreshold(0) matcher = cv2.BFMatcher(cv2.NORM_HAMMING) gms = GmsMatcher(orb, matcher) kp0, des0 = orb.detectAndCompute(self.img0, np.array([])) kp1, des1 = orb.detectAndCompute(self.img1, np.array([])) matches = matcher.match(des0, des1) matches = gms.compute_matches(kp0, kp1, des0, des1, matches, self.img0) self.assertTrue(len(matches) > 0) # def test_compute_matches2(self): # orb = cv2.ORB_create(1000) # orb.setFastThreshold(0) # matcher = cv2.BFMatcher(cv2.NORM_HAMMING) # gms = GmsMatcher(orb, matcher) # # camera = Camera() # img0 = camera.update() # # while True: # img1 = camera.update() # matches = gms.compute_matches(img0, img1) # gms.draw_matches(img0, img1) # # img0 = img1 # # # matches_img = draw_matches(img0, img1, kp0, kp1, matches) # # cv2.imshow("Mathces", matches_img) # # if cv2.waitKey(1) == 113: # # exit(0) # # self.assertTrue(len(matches) > 0)
def __init__(self, **kwargs): """ Constructor """ self.orb = cv2.ORB_create( nfeatures=kwargs.get("nfeatures", 500), scaleFactor=kwargs.get("scaleFactor", 1.2), nlevels=kwargs.get("nlevels", 8), edgeThreshold=kwargs.get("edgeThreshold", 31), firstLevel=kwargs.get("firstLevel", 0), WTA_K=kwargs.get("WTA_K", 2), scoreType=kwargs.get("scoreType", cv2.ORB_HARRIS_SCORE), patchSize=kwargs.get("patchSize", 31), fastThreshold=kwargs.get("fastThreshold", 20) )
def __init__(self, image, maxImageSize=1000, minInlierRatio=0.15, minInliers=25, fast=False): ''' maxImageSize -> limit image size to speed up process, set to False to deactivate minInlierRatio --> [e.g:0.2] -> min 20% inlier need to be matched, else: raise Error ''' self.signal_ranges = [] self.maxImageSize = maxImageSize self.minInlierRatio = minInlierRatio self.minInliers = minInliers self._fH = None # homography factor, if image was resized self.base8bit = self._prepareImage(image) # Parameters for nearest-neighbour matching # flann_params = dict(algorithm=1, trees=2) # self.matcher = cv2.FlannBasedMatcher(flann_params, {}) # PATTERN DETECTOR: # self.detector = cv2.BRISK_create() if fast: self.detector = cv2.ORB_create() else: self.detector = cv2.ORB_create( nfeatures=70000, # scoreType=cv2.ORB_FAST_SCORE ) # removed because of license issues: # cv2.xfeatures2d.SIFT_create() f, d = self.detector.detectAndCompute(self.base8bit, None) self.base_features, self.base_descs = f, d # .astype(np.float32)
def describeORB( image): #An efficient alternative to SIFT or SURF #doc http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_feature2d/py_orb/py_orb.html #ORB is basically a fusion of FAST keypoint detector and BRIEF descriptor #with many modifications to enhance the performance orb=cv2.ORB_create() kp, des=orb.detectAndCompute(image,None) return kp,des
def orb(gray): orb = cv2.ORB_create() kp, des = orb.detectAndCompute(gray, None) return kp,des
def orb_features(image, keypoints): if isinstance(keypoints, np.ndarray): # takes in x, y coordinates. size is the diameter of the descripted area keypoints = [cv2.KeyPoint(p[0], p[1], ORB_DESCRIPTOR_SIZE) for p in keypoints] orb = cv2.ORB_create() new_keypoints, descriptors = orb.compute(np.mean(image, axis=2).astype(np.uint8), keypoints) print('len(keypoints)', len(keypoints)) print('len(new_keypoints)', len(new_keypoints)) return new_keypoints, descriptors
def createTrainingInstances(self, images): instances = [] img_descriptors = [] master_descriptors = [] cv2.ocl.setUseOpenCL(False) orb = cv2.ORB_create() for img, label in images: print img img = read_color_image(img) keypoints = orb.detect(img, None) keypoints, descriptors = orb.compute(img, keypoints) if descriptors is None: descriptors = [] img_descriptors.append(descriptors) for i in descriptors: master_descriptors.append(i) master_descriptors = np.float32(master_descriptors) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) labels = labels.ravel() count = 0 img_num = 0 for img, label in images: histogram = np.zeros(self.center_num) feature_vector = img_descriptors[img_num] for f in xrange(len(feature_vector)): index = count + f histogram.itemset(labels[index], 1 + histogram.item(labels[index])) count += len(feature_vector) pairing = Instance(histogram, label) instances.append(pairing) self.training_instances = instances self.centers = centers
def createTestingInstances(self, images): cv2.ocl.setUseOpenCL(False) orb = cv2.ORB_create() instances = [] for img, label in images: print img img = read_color_image(img) keypoints = orb.detect(img, None) keypoints, descriptors = orb.compute(img, keypoints) if descriptors is None: descriptors = [] histogram = np.zeros(self.center_num) for d in descriptors: min_val = None min_index = None for j in xrange(len(self.centers)): distance = np.linalg.norm(d - self.centers[j]) if min_val is None or distance < min_val: min_val = distance min_index = j histogram.itemset(min_index, 1 + histogram.item(min_index)) instances.append(Instance(histogram, label)) self.testing_instances = instances
def local_bow_train(image): instances = [] img_descriptors = [] master_descriptors = [] cv2.ocl.setUseOpenCL(False) orb = cv2.ORB_create() for img, label in images: print img img = read_color_image(img) keypoints = orb.detect(img, None) keypoints, descriptors = orb.compute(img, keypoints) if descriptors is None: descriptors = [] img_descriptors.append(descriptors) for i in descriptors: master_descriptors.append(i) master_descriptors = np.float32(master_descriptors) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) ret, labels, centers = cv2.kmeans(master_descriptors, self.center_num, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) labels = labels.ravel() count = 0 img_num = 0 for img, label in images: histogram = np.zeros(self.center_num) feature_vector = img_descriptors[img_num] for f in xrange(len(feature_vector)): index = count + f histogram.itemset(labels[index], 1 + histogram.item(labels[index])) count += len(feature_vector) pairing = Instance(histogram, label) instances.append(pairing) self.training_instances = instances self.centers = centers
def orb_thread(): orb = cv2.ORB_create() kps4 = orb.detect(gray, None) (kps4, des4) = orb.compute(gray, kps4) cv2.drawKeypoints(gray, kps4, img4, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) cv2.imshow('ORB Algorithm', img4)
def main(): img = None main_win = Windows_handler.WinHandler(title='Nox',class_name='Qt5QWindowIcon') main_box = main_win.get_bbox() px_handler = Pixel_handler.PixelSearch(win_handler=main_win) mouse = Mouse_handler.MouseMovement(window_handler=main_win) main_win.init_window() cv2.namedWindow('image_name') cv2.namedWindow('config') while True: img = px_handler.grab_window(bbox=main_box) img = px_handler.img_to_numpy(img,compound=False) img = cv2.cvtColor(img,cv2.COLOR_RGB2BGR) orb = cv2.ORB_create() kp = orb.detect(img, None) kp, des = orb.compute(img, kp) img2 = cv2.drawKeypoints(img, kp) cv2.imshow('image_name',img2) cv2.setMouseCallback('image_name', mouse_event, param=img) k = cv2.waitKey(1) if k == ord('q'): # wait for ESC key to exit cv2.destroyAllWindows() quit(0)
def process_loop(self): cap_sd = cv2.VideoCapture('pipe:%d' % self.pipe_r_sd) fps = cap_sd.get(cv2.CAP_PROP_FPS) fps = 24 self.ws.log('pr: opened video') det = cut_detector.ContentDetector() orb = cv2.ORB_create() i = 0 scene = 0 while cap_sd.isOpened(): if self.do_stop: break ret, frame = cap_sd.read() # self.ws.log('pr: read frame', i) is_cut = det.process_frame(i, frame) kp = orb.detect(frame, None) kp, des = orb.compute(frame, kp) # img2 = cv2.drawKeypoints(frame, kp, None, color=(0,255,0), flags=0) # cv2.imshow('', img2) # cv2.waitKey(0) # 1/0 if is_cut: self.ws.log('pr: cut at', i) preview = 'previews/frame%04d_%d.png' % (scene, i) cv2.imwrite(preview, frame) self.ws.sendJSON({ 'scene': scene, 'time': frame2time(i, fps), 'preview': preview }) scene += 1 # call to descriptor callback self.desc_cb(i, des, is_cut) self.processed = i i += 1 cap_sd.release()
def __init__(self, action_space, feature_type=None, filter_features=None, max_time_steps=100, distance_threshold=4.0, **kwargs): """ filter_features indicates whether to filter out key points that are not on the object in the current image. Key points in the target image are always filtered out. """ SimpleQuadPanda3dEnv.__init__(self, action_space, **kwargs) ServoingEnv.__init__(self, env=self, max_time_steps=max_time_steps, distance_threshold=distance_threshold) lens = self.camera_node.node().getLens() self._observation_space.spaces['points'] = BoxSpace(np.array([-np.inf, lens.getNear(), -np.inf]), np.array([np.inf, lens.getFar(), np.inf])) film_size = tuple(int(s) for s in lens.getFilmSize()) self.mask_camera_sensor = Panda3dMaskCameraSensor(self.app, (self.skybox_node, self.city_node), size=film_size, near_far=(lens.getNear(), lens.getFar()), hfov=lens.getFov()) for cam in self.mask_camera_sensor.cam: cam.reparentTo(self.camera_sensor.cam) self.filter_features = True if filter_features is None else False self._feature_type = feature_type or 'sift' if cv2.__version__.split('.')[0] == '3': from cv2.xfeatures2d import SIFT_create, SURF_create from cv2 import ORB_create if self.feature_type == 'orb': # https://github.com/opencv/opencv/issues/6081 cv2.ocl.setUseOpenCL(False) else: SIFT_create = cv2.SIFT SURF_create = cv2.SURF ORB_create = cv2.ORB if self.feature_type == 'sift': self._feature_extractor = SIFT_create() elif self.feature_type == 'surf': self._feature_extractor = SURF_create() elif self.feature_type == 'orb': self._feature_extractor = ORB_create() else: raise ValueError("Unknown feature extractor %s" % self.feature_type) if self.feature_type == 'orb': self._matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) else: self._matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) self._target_key_points = None self._target_descriptors = None
def get_orb_keypoints(bd, image_min, image_max): """ Computes the ORB key points Args: bd (2d array) image_min (int or float) image_max (int or float) """ # We want odd patch sizes. # if parameter_object.scales[-1] % 2 == 0: # patch_size = parameter_object.scales[-1] - 1 if bd.dtype != 'uint8': bd = np.uint8(rescale_intensity(bd, in_range=(image_min, image_max), out_range=(0, 255))) patch_size = 31 patch_size_d = patch_size * 3 # Initiate ORB detector orb = cv2.ORB_create(nfeatures=int(.25*(bd.shape[0]*bd.shape[1])), edgeThreshold=patch_size, scaleFactor=1.2, nlevels=8, patchSize=patch_size, WTA_K=4, scoreType=cv2.ORB_FAST_SCORE) # Add padding because ORB ignores edges. bd = cv2.copyMakeBorder(bd, patch_size_d, patch_size_d, patch_size_d, patch_size_d, cv2.BORDER_REFLECT) # Compute ORB keypoints key_points = orb.detectAndCompute(bd, None)[0] # img = cv2.drawKeypoints(np.uint8(ch_bd), key_points, np.uint8(ch_bd).copy()) return fill_key_points(np.float32(bd), key_points)[patch_size_d:-patch_size_d, patch_size_d:-patch_size_d]
def stackImagesKeypointMatching(file_list): orb = cv2.ORB_create() # disable OpenCL to because of bug in ORB in OpenCV 3.1 cv2.ocl.setUseOpenCL(False) stacked_image = None first_image = None first_kp = None first_des = None for file in file_list: print(file) image = cv2.imread(file,1) imageF = image.astype(np.float32) / 255 # compute the descriptors with ORB kp = orb.detect(image, None) kp, des = orb.compute(image, kp) # create BFMatcher object matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True) if first_image is None: # Save keypoints for first image stacked_image = imageF first_image = image first_kp = kp first_des = des else: # Find matches and sort them in the order of their distance matches = matcher.match(first_des, des) matches = sorted(matches, key=lambda x: x.distance) src_pts = np.float32( [first_kp[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2) dst_pts = np.float32( [kp[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2) # Estimate perspective transformation M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0) w, h, _ = imageF.shape imageF = cv2.warpPerspective(imageF, M, (h, w)) stacked_image += imageF stacked_image /= len(file_list) stacked_image = (stacked_image*255).astype(np.uint8) return stacked_image # ===== MAIN ===== # Read all files in directory