我们从Python开源项目中,提取了以下12个代码示例,用于说明如何使用cv2.TM_SQDIFF。
def locate_img(image, template): img = image.copy() res = cv2.matchTemplate(img, template, method) print res print res.shape cv2.imwrite('image/shape.png', res) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) print cv2.minMaxLoc(res) if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc h, w = template.shape bottom_right = (top_left[0] + w, top_left[1]+h) cv2.rectangle(img, top_left, bottom_right, 255, 2) cv2.imwrite('image/tt.jpg', img)
def test_crop_random(): # Given one sample image and the following parameters image = helpers.get_one_sample_image() parameters = {"dst_size" : (20, 20), "n_patches" : 5, } # When perform crop_random() patches = utils.crop_random(image, parameters["dst_size"], parameters["n_patches"]) # Then every patch should be included in an image. match_cost = [] for patch in patches: M = cv2.matchTemplate(image, patch, cv2.TM_SQDIFF) min_cost, _, _, _ = cv2.minMaxLoc(M) match_cost.append(min_cost) assert np.array(match_cost).all() == 0, "utils.crop_random() unit test failed!!"
def matchTemplate(img_full, img_template, meth): w, h = img_template.shape[::-1] img = img_full.copy() # Apply template Matching method = eval(meth) res = cv2.matchTemplate(img,img_template,method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) return [top_left, bottom_right]
def getMatchingScore(img,digit): score = (cv2.matchTemplate(img,cv2.imread('Templates/' + 'T'+str(digit) + '.jpg',0),cv2.TM_SQDIFF)/2000) return score # Gets the best prediction of the digit in a cell using template matching
def find_matches(img, template_list): # Make a copy of the image to draw on # Define an empty list to take bbox coords bbox_list = [] # Iterate through template list # Read in templates one by one # Use cv2.matchTemplate() to search the image # using whichever of the OpenCV search methods you prefer # Use cv2.minMaxLoc() to extract the location of the best match # Determine bounding box corners for the match # Return the list of bounding boxes method = cv2.TM_CCOEFF_NORMED for temp in templist: tmp = mpimg.imread(temp) # Apply template Matching res = cv2.matchTemplate(img,tmp,method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) w, h = (tmp.shape[1], tmp.shape[0]) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) bbox_list.append((top_left, bottom_right)) return bbox_list
def template_match(img_master, img_slave, method = 'cv2.TM_CCOEFF_NORMED', mlx = 1, mly = 1, show=True): # Apply image oversampling img_master = cv2.resize(img_master,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC) img_slave = cv2.resize(img_slave,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC) res = cv2.matchTemplate(img_slave,img_master,eval(method)) w, h = img_master.shape[::-1] min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # Control if the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum value if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) # Retrieve center coordinates px = (top_left[0]+bottom_right[0])/(2.0*mlx) py = (top_left[1]+bottom_right[1])/(2.0*mly) # Scale images for visualization img_master_scaled = cv2.convertScaleAbs(img_master, alpha=(255.0/500)) img_slave_scaled = cv2.convertScaleAbs(img_slave, alpha=(255.0/500)) cv2.rectangle(img_slave_scaled,top_left, bottom_right, 255, 2*mlx) if show == True: plt.figure(figsize=(20,10)) plt.subplot(131),plt.imshow(res,cmap = 'gray') plt.title('Matching Result'), plt.xticks([]), plt.yticks([]) plt.subplot(132),plt.imshow(img_master_scaled,cmap = 'gray') plt.title('Detected Point'), plt.xticks([]), plt.yticks([]) plt.subplot(133),plt.imshow(img_slave_scaled, cmap = 'gray') plt.suptitle(method) plt.show() return px, py, max_val
def matchTemplate(self, img_full, img_template, aMeth): w, h = img_template.shape[::-1] img = img_full.copy() # Apply template Matching method = eval(aMeth) res = cv2.matchTemplate(img,img_template,method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) return [top_left, bottom_right]
def click_image(image, notify=True): if notify: _notify("starting to click " + image) if isinstance(image, str) or isinstance(image, unicode): template = cv2.imread(image, 0) elif isinstance(image, PngImageFile): pass # need to convert to cv2 image type sleep(2) #GET SCREENSHOT call(["gnome-screenshot", "--file=/tmp/beryl.png"]) sleep(1) #FIND LOCATION OF NAME source = cv2.imread('/tmp/beryl.png', 0) points = [] w, h = template.shape[::-1] methods = [cv2.TM_CCOEFF,cv2.TM_CCOEFF_NORMED,cv2.TM_CCORR,cv2.TM_CCORR_NORMED,cv2.TM_SQDIFF,cv2.TM_SQDIFF_NORMED] for method in methods: # Apply Template Matching result = cv2.matchTemplate(source.copy(), template, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result) #If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc #bottom_right = (top_left[0] + w, top_left[1] + h) # (x,y) point = ( top_left[0] + (float(w)/2), top_left[1] + (float(h)/2) ) points.append(point) best_point = sorted([(point, avg_distance(point, points)) for point in points], key=lambda tup: tup[1])[0][0] click_location(best_point) if notify: _notify("finished clicking image")
def match_template_opencv(template, image, options): """ Match template using OpenCV template matching implementation. Limited by number of channels as maximum of 3. Suitable for direct RGB or Gray-scale matching :param options: Other options: - distance: Distance measure to use. (euclidean | correlation | ccoeff). Default: 'correlation' - normalize: Heatmap values will be in the range of 0 to 1. Default: True - retain_size: Whether to retain the same size as input image. Default: True :return: Heatmap """ # if image has more than 3 channels, use own implementation if len(image.shape) > 3: return match_template(template, image, options) op = _DEF_TM_OPT.copy() if options is not None: op.update(options) method = cv.TM_CCORR_NORMED if op['normalize'] and op['distance'] == 'euclidean': method = cv.TM_SQDIFF_NORMED elif op['distance'] == 'euclidean': method = cv.TM_SQDIFF elif op['normalize'] and op['distance'] == 'ccoeff': method = cv.TM_CCOEFF_NORMED elif op['distance'] == 'ccoeff': method = cv.TM_CCOEFF elif not op['normalize'] and op['distance'] == 'correlation': method = cv.TM_CCORR heatmap = cv.matchTemplate(image, template, method) # make minimum peak heatmap if method not in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]: heatmap = heatmap.max() - heatmap if op['normalize']: heatmap /= heatmap.max() # size if op['retain_size']: hmap = np.ones(image.shape[:2]) * heatmap.max() h, w = heatmap.shape hmap[:h, :w] = heatmap heatmap = hmap return heatmap
def find_subimage_in_array(self, sub_image, main_image, threshold=0.40, value=False, debug=False): """ http://docs.opencv.org/3.1.0/d4/dc6/tutorial_py_template_matching.html Args: sub_image: A numby matrix containing the template we are trying to match main_image: A numpy array containing the main image we are trying to find the template in value: If true: Similarity is sent back. threshold: A treshhold regarding hos sensitive the matching should be. Returns: A list containing touples: If value is true: The touples got he following elements(left,top,right,down,similarity) Where similarity is a measure toward one Else: The touples got he following elements(left,top,right,down) """ # TODO: Check the test_init_wnd test for how to implement this :) logging.debug("Doing a template match with {} as threshold".format(threshold)) methods = [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR, cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED] method = methods[0] h, w = sub_image.shape[0:2] res = cv2.matchTemplate(main_image, sub_image, method) loc = np.where(res >= threshold) locations = [] for pt in zip(*loc[::-1]): if value: locations.append((pt[0], pt[1], pt[0] + w, pt[1] + h, res[pt[1], pt[0]])) else: locations.append((pt[0], pt[1], pt[0] + w, pt[1] + h)) logging.debug("Found {} locations".format(len(locations))) if debug: plt.subplot(121), plt.imshow(res, cmap='gray') plt.title('Matching Result'), plt.xticks([]), plt.yticks([]) plt.subplot(122), plt.imshow(main_image, cmap='gray') plt.title('Detected Point'), plt.xticks([]), plt.yticks([]) for pt in zip(*loc[::-1]): cv2.rectangle(main_image, pt, (pt[0] + w, pt[1] + h), (255, 0, 255), 2) plt.imshow(main_image) plt.show() if value: locations.sort(reverse=True, key=operator.itemgetter(4)) return list(map(operator.itemgetter(0, 1, 2, 3), locations))