我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.TM_CCOEFF_NORMED。
def get_match_confidence(img1, img2, mask=None): if img1.shape != img2.shape: return False ## first try, using absdiff # diff = cv2.absdiff(img1, img2) # h, w, d = diff.shape # total = h*w*d # num = (diff<20).sum() # print 'is_match', total, num # return num > total*0.90 if mask is not None: img1 = img1.copy() img1[mask!=0] = 0 img2 = img2.copy() img2[mask!=0] = 0 ## using match match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED) _, confidence, _, _ = cv2.minMaxLoc(match) # print confidence return confidence
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200): if roi is not None: scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)] if not scale: scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr) peaks = [] if scale: scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) canny_scene = cv2.Canny(scene, threshold_min, threshold_max) canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max) # Threshold for peaks. corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED) _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map) good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance))) if debug: print(max_corr, good_points) clusters = self.get_clusters(good_points, max_distance=max_distance) peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters] return (scale, peaks)
def find_best_scale(self, feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.03, min_corr=0.8): best_corr = 0 best_scale = 0 for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta): scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED) _, max_val, _, _ = cv2.minMaxLoc(result) if max_val > best_corr: best_corr = max_val best_scale = scale if best_corr > min_corr: return best_scale else: return None
def find_best_scale(feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.02, min_corr=0.8): best_corr = 0 best_scale = 0 scale = min_scale for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta): scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED) _, max_val, _, max_loc = cv2.minMaxLoc(result) if max_val > best_corr: best_corr = max_val best_scale = scale if best_corr > min_corr: return best_scale else: return None
def find_float(img_name): print 'Looking for float' # todo: maybe make some universal float without background? for x in range(0, 7): template = cv2.imread('var/fishing_float_' + str(x) + '.png', 0) img_rgb = cv2.imread(img_name) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) # print('got images') w, h = template.shape[::-1] res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED) threshold = 0.6 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]): cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2) if loc[0].any(): print 'Found ' + str(x) + ' float' if dev: cv2.imwrite('var/fishing_session_' + str(int(time.time())) + '_success.png', img_rgb) return (loc[1][0] + w / 2) / 2, (loc[0][0] + h / 2) / 2
def play_game(get_command_callback: Callable[[int, int, int], str]) -> int: with mss() as screenshotter: get_game_landscape_and_set_focus_or_die(screenshotter) reset_game() landscape = get_game_landscape_and_set_focus_or_die(screenshotter, .95) start_game() gameover_template = cv2.imread(os.path.join('templates', 'dino_gameover.png'), 0) start = time.time() last_distance = landscape['width'] x1, x2, y1, y2 = compute_region_of_interest(landscape) speed = 0 last_compute_speed = time.time() last_speeds = [3] * 30 last_command_time = time.time() while True: buffer = screenshotter.grab(landscape) image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L') image = np.array(image) image += np.abs(247 - image[0, x2]) roi = image[y1:y2, x1:x2] score = int(time.time() - start) distance, size = compute_distance_and_size(roi, x2) speed = compute_speed(distance, last_distance, speed, last_speeds, last_compute_speed) last_compute_speed = time.time() # Check GAME OVER if distance == last_distance or distance == 0: res = cv2.matchTemplate(image, gameover_template, cv2.TM_CCOEFF_NORMED) if np.where(res >= 0.7)[0]: reset_game() return score last_distance = distance if time.time() - last_command_time < 0.6: continue command = get_command_callback(distance, size, speed) if command: last_command_time = time.time() pyautogui.press(command)
def find_game_position(screenshotter, threshold) -> Dict: dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0) w, h = dino_template.shape[::-1] landscape_template = cv2.imread(os.path.join('templates', 'dino_landscape.png'), 0) lw, lh = landscape_template.shape[::-1] monitor = screenshotter.monitors[0] buffer = screenshotter.grab(monitor) image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L') image = np.array(image) res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= threshold) if len(loc[0]): pt = next(zip(*loc[::-1])) return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw) return {}
def match_template(screenshot, template): # Perform match template calculation matches = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED) # Survey results (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(matches) # Load template size (template_height, template_width) = template.shape[:2] return { "x1": max_loc[0], "y1": max_loc[1], "x2": max_loc[0] + template_width, "y2": max_loc[1] + template_height, "center": { "x": max_loc[0] + (template_width / 2), "y": max_loc[1] + (template_height / 2) }, "score": max_val }
def __get_uniq_faces_curr_frame_template_match(self, frame_id, frame_prev, faces_roi): logger.info("[{0}] Face Similarity: # of faces in current frame - {1}".format(frame_id, len(faces_roi))) # First Time if frame_prev.size == 0: return len(faces_roi) uniq_faces_curr_frame = 0 for template_roi in faces_roi: # Apply template Matching res = cv2.matchTemplate(frame_prev, template_roi, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) logger.info("[{0}] {1},{2},{3},{4}".format(frame_id, min_val, max_val, min_loc, max_loc)) logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame)) return uniq_faces_curr_frame
def __init__(self): t = ImageGrab.grab().convert("RGB") self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR) self.ultLoader = ImageLoader('image/ult/') if self.have('topleft'): tl = self._imageLoader.get('topleft') res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) x1, y1 = max_loc rd = self._imageLoader.get('rightdown') res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) x2, y2 = max_loc # default 989 GameStatus().y = y2 - y1 GameStatus().use_Droid4X = True
def find_list(self, name): cards = [] res = cv2.matchTemplate(self.screen, self._imageLoader.get(name), cv2.TM_CCOEFF_NORMED) threshold = 0.8 loc = numpy.where(res >= threshold) x = 0 t = sorted(zip(*loc[::-1])) for pt in t: if abs(x - pt[0]) > 100 or x == 0: x = pt[0] cards.append((pt[0], pt[1])) else: continue self.log(name + ': ' + str(len(cards))) return cards
def detect(self, image): # convert image to grayscale image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # apply template matching result = cv2.matchTemplate(image_gray, self.template, cv2.TM_CCOEFF_NORMED) # obtain locations, where threshold met locations = np.where(result >= self.THRESHOLD) for item in locations: if len(item) == 0: return None return locations
def check_list(self): items_dict = imd.ImageStorage() items_dict = items_dict.pickled_dict RS.press_button('equipment') time.sleep(1) for key in items_dict.keys(): template = items_dict[key] #save for DEBUG #cv2.imwrite('debug_template_file', template_) w, h = template.shape[::-1] pattern = RS.get_bag('only','gray') res = cv2.matchTemplate(pattern,template,cv2.TM_CCOEFF_NORMED) threshold = .8 #default is 8 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]):#goes through each found image print('{} found'.format(key)) break else: print('{} not found'.format(key))
def this(img_pat, img_temp): """pass img_pat as a cv2 image format, img_temp as a file Passed Function to do w/e after finding img_temp""" cwd = os.getcwd() if cwd not in img_temp: img_temp = cwd+img_temp if '.png' not in img_temp: img_temp = cwd+img_temp+'.png' #print for DEBUG #print(img_temp) #img_temp img_temp = cv2.imread(img_temp,0) #save for DEBUG #cv2.imwrite('img_temp', img_temp) w, h = img_temp.shape[::-1] res = cv2.matchTemplate(img_pat,img_temp,cv2.TM_CCOEFF_NORMED) threshold = .8 #default is 8 loc = np.where( res >= threshold) return loc, w, h
def images(img_pat, img_temp,x,y, func): w, h = img_temp.shape[::-1] try: res = cv2.matchTemplate(img_temp,img_pat,cv2.TM_CCOEFF_NORMED) except Exception as e: print("cannot match") print(e) threshold = .9 #default is 8 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]):#goes through each found image func(img_pat, x, y, pt, w, h) return 0 return 1 #return loc to be iterable outisde the function #also sometimes width and height of image is needed
def exists(image, template, thresh): """ Returns True if template is in Image with probability of at least thresh :param image: :param template: :param thresh: :return: """ digit_res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) loc = np.where(digit_res >= thresh) if len(loc[-1]) == 0: return False for pt in zip(*loc[::-1]): if digit_res[pt[1]][pt[0]] == 1: return False return True
def identify_summons(image_path): import cv2 import numpy as np image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2GRAY) summons = [] points = 0 for file_name, (point_value, actual_name) in possible_summons.items(): template = cv2.imread(os.path.join('screenshots', 'summons', file_name + '.png'), cv2.IMREAD_GRAYSCALE) res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= CLOSENESS_THRESHOLD) for pt in zip(*loc[::-1]): # Due to weird behaviour, only add one instance of each summon if actual_name in summons: continue summons.append(actual_name) points += point_value return (summons, points)
def image_is_on_screen(template_name): template = cv2.imread(os.path.join( 'screenshots', template_name + '.png'), cv2.IMREAD_GRAYSCALE) image = cv2.cvtColor( np.array(pyautogui.screenshot( region=(0, 0, 1300, 750))), cv2.COLOR_BGR2GRAY) res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= CLOSENESS_THRESHOLD) # Not sure why this works but okay for pt in zip(*loc[::-1]): return True return False
def detect(self, template): template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if self.is_cards_screen: template = self.scale_template_for_cards_screen(template) result = cv2.matchTemplate(self.original, template, cv2.TM_CCOEFF_NORMED) loc = np.where(result >= self.threshold) points = zip(*loc[::-1]) if len(points) > 0: return HeroDetector.combine_points(points) return None # Scale template down if we're on the game-over screen since the hero # portraits are smaller there than during the game.
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55): """ Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer. @threshold percentage of similarity """ __readDigitTemplates() digit = digit.copy() if digit.shape[2] == 3: digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY) ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV) bestDigit = -1 if method == REC_METHOD_TEMPLATE_MATCHING: bestMatch = None for i in range(len(__DIGIT_TEMPLATES)): template = __DIGIT_TEMPLATES[i].copy() if digit.shape[1] < template.shape[1]: template = cv2.resize(template, (digit.shape[1], digit.shape[0])) else: digit = cv2.resize(digit, (template.shape[1], template.shape[0])) result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED) (_, max_val, _, max_loc) = cv2.minMaxLoc(result) if bestMatch is None or max_val > bestMatch: bestMatch = max_val bestDigit = i print("New Best Match:", bestMatch, bestDigit) if (bestMatch * 100) >= threshold: return (bestDigit, bestMatch * 100) return (-1, 0)
def find_matches(img, template_list): # Make a copy of the image to draw on # Define an empty list to take bbox coords bbox_list = [] # Iterate through template list # Read in templates one by one # Use cv2.matchTemplate() to search the image # using whichever of the OpenCV search methods you prefer # Use cv2.minMaxLoc() to extract the location of the best match # Determine bounding box corners for the match # Return the list of bounding boxes method = cv2.TM_CCOEFF_NORMED for temp in templist: tmp = mpimg.imread(temp) # Apply template Matching res = cv2.matchTemplate(img,tmp,method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) w, h = (tmp.shape[1], tmp.shape[0]) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) bbox_list.append((top_left, bottom_right)) return bbox_list
def test_find_scene(): scenes = {} for s in os.listdir('txxscene'): if '-' in s: continue i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE) scenes[s] = i # names = [os.path.join('scene', c) for c in os.listdir('scene')] imgs = {} for n in os.listdir('scene'): i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE) i = cv2.resize(i, (960, 540)) imgs[n] = i for name, img in imgs.iteritems(): for scene, tmpl in scenes.iteritems(): res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val < 0.6: continue x, y = max_loc h, w = tmpl.shape cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2) print name, scene, max_val, min_val cv2.imshow('found', img) cv2.waitKey()
def find_match(img, tmpl, rect=None, mask=None): if rect is not None: h, w = img.shape[:2] x, y, x1, y1 = rect if x1 > w or y1 > h: return 0, None img = img[y:y1, x:x1, :] if mask is not None: img = img.copy() img[mask!=0] = 0 tmpl = tmpl.copy() tmpl[mask!=0] = 0 s_bgr = cv2.split(tmpl) # Blue Green Red i_bgr = cv2.split(img) weight = (0.3, 0.3, 0.4) resbgr = [0, 0, 0] for i in range(3): # bgr resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED) match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2] min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) confidence = max_val x, y = max_loc h, w = tmpl.shape[:2] if rect is None: rect = (x, y, x+w, y+h) # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2) # cv2.imshow('test', img) # cv2.waitKey(20) return confidence, rect
def match(self, templateimage, threshold=0.8): image = cv2.imread(self.sourceimage) template = cv2.imread(templateimage) result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) similarity = cv2.minMaxLoc(result)[1] if similarity < threshold: return similarity else: return np.unravel_index(result.argmax(), result.shape)
def fit(img, templates, start_percent, stop_percent, threshold): img_width, img_height = img.shape[::-1] best_location_count = -1 best_locations = [] best_scale = 1 plt.axis([0, 2, 0, 1]) plt.show(block=False) x = [] y = [] for scale in [i/100.0 for i in range(start_percent, stop_percent + 1, 3)]: locations = [] location_count = 0 for template in templates: template = cv2.resize(template, None, fx = scale, fy = scale, interpolation = cv2.INTER_CUBIC) result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED) result = np.where(result >= threshold) location_count += len(result[0]) locations += [result] print("scale: {0}, hits: {1}".format(scale, location_count)) x.append(location_count) y.append(scale) plt.plot(y, x) plt.pause(0.00001) if (location_count > best_location_count): best_location_count = location_count best_locations = locations best_scale = scale plt.axis([0, 2, 0, best_location_count]) elif (location_count < best_location_count): pass plt.close() return best_locations, best_scale
def match_template_mask(image, template, mask=None, method=None, sigma=0.33): """Match template against image applying mask to template using method. Method can be either of (None, 'laplacian', 'sobel', 'scharr', 'prewitt', 'roberts', 'canny'). Returns locations to look for max values.""" if mask is not None: if method: kernel = np.ones((3, 3), np.uint8) mask = cv2.erode(mask, kernel) if method == 'laplacian': # use CV_64F to not loose edges, convert to uint8 afterwards edge_image = np.uint8(np.absolute( cv2.Laplacian(image, cv2.CV_64F))) edge_template = np.uint8(np.absolute( cv2.Laplacian(template, cv2.CV_64F) )) elif method in ('sobel', 'scharr', 'prewitt', 'roberts'): filter_func = getattr(skfilters, method) edge_image = filter_func(image) edge_template = filter_func(template) edge_image = convert(edge_image) edge_template = convert(edge_template) else: # method == 'canny' values = np.hstack([image.ravel(), template.ravel()]) median = np.median(values) lower = int(max(0, (1.0 - sigma) * median)) upper = int(min(255, (1.0 + sigma) * median)) edge_image = cv2.Canny(image, lower, upper) edge_template = cv2.Canny(template, lower, upper) results = cv2.matchTemplate(edge_image, edge_template & mask, cv2.TM_CCOEFF_NORMED) else: results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED, mask) else: results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) return results
def MatchTemplate(template, target): """Returns match score for given template""" res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) return max_val
def matchTemplate(self, img, template): res = cv2.matchTemplate(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = max_loc bottom_right = (top_left[0] + template.shape[1], top_left[1] + template.shape[0]) return top_left, bottom_right
def click_on(self, name, repeat=False, loader=_imageLoader): if GameStatus().game_stage == GameStage.Stopped: return self.log('try click ' + name) p = loader.get(name) max_val = 0 x, y = 0, 0 while max_val < 0.8: if GameStatus().game_stage == GameStage.Stopped: return self.capture() res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) self.log(name + ' ' + str(max_val)) x, y = max_loc time.sleep(self._delay) m, n, q = p.shape x += n / 2 y += m / 2 self._click(x, y) max_val = 1 if repeat else 0 while max_val > 0.8: if GameStatus().game_stage == GameStage.Stopped: return time.sleep(1) self.capture() res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val > 0.8: self._click(x, y)
def chances_of(self, name, loader=_imageLoader): self.capture() p = loader.get(name) res = cv2.matchTemplate(self.screen, p, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) self.log('chances of ' + name + ': ' + str(max_val)) return max_val
def findAround(pic,pat,xy=None,r=None): """ find image pattern ``pat`` in ``pic[x +/- r, y +/- r]``. if xy is none, consider the whole picture. """ if xy and r: h,w = pat.shape[:2] x,y = xy pic = pic[y-r : y+h+r , x-r : x+w+r] matches = cv2.matchTemplate(pat,pic,cv2.TM_CCOEFF_NORMED) yf,xf = np.unravel_index(matches.argmax(),matches.shape) return (x-r+xf,y-r+yf) if (xy and r) else (xf,yf)
def find_locs(img_gray_on): img_gray_off = img_gray_on.copy() res_on = cv2.matchTemplate(img_gray_on, templates["on"], cv2.TM_CCOEFF_NORMED) res_off = cv2.matchTemplate(img_gray_off, templates["off"], cv2.TM_CCOEFF_NORMED) loc_on = np.where(res_on >= threshold) loc_off = np.where(res_off >= threshold) locs = {"on": list(zip(*loc_on[::-1])), "off": list(zip(*loc_off[::-1]))} return locs
def find_switch_box(): img_gray = grab_screen_gray() res = cv2.matchTemplate(img_gray, templates["switch_box"], cv2.TM_CCOEFF_NORMED) loc = np.where(res >= threshold) return list(zip(*loc[::-1]))
def find_switch_spaces(lb): img_gray = grab_screen_gray() res = cv2.matchTemplate(img_gray, templates["switch_box"], cv2.TM_CCOEFF_NORMED) locs = np.where(res >= threshold) try: tl_pt_a = list(zip(*locs[::-1]))[0] except IndexError: return -1 tl_pt_e = (tl_pt_a[0], tl_pt_a[1] + h_gap) switches = lb.switches half_num_switches = lb.num_switches // 2 # Assumes that the switches are # in two rows switch_spaces = {i: (0,0) for i in lb.switches} for i in range(0, half_num_switches): tl_pt = tl_pt_a[0] + (w_sw * i) + (w_gap * i) switch_spaces[lb.switches[i]] = (tl_pt, tl_pt_a[1]) j = 0 for i in range(half_num_switches, lb.num_switches): tl_pt = tl_pt_e[0] + (w_sw * j) + (w_gap * j) switch_spaces[switches[i]] = (tl_pt, tl_pt_e[1]) j += 1 return switch_spaces
def lb_open(): img_gray = grab_screen_gray() res = cv2.matchTemplate(img_gray, templates["switch_box"], cv2.TM_CCOEFF_NORMED) locs = np.where(res >= threshold) return len(locs[0]) == 1
def image_to_digit_list(image, digit_templates, thresh): """ Convert an image to a list of digits. :param image: The part of the image containing the number. :param digit_templates: Images of all the digits. :param thresh: The threshold required to detect an image. :return: a list of digits with data about the probability they were found in the image and their position. """ # Initialize variables. digit_list = [] digit = 0 # Convert the values from the 'height' and 'velocity' images. for digit_image in digit_templates: # Get a matrix of values containing the probability the pixel is the top-left part of the template. digit_res = cv2.matchTemplate(image, digit_image, cv2.TM_CCOEFF_NORMED) # Get a list of all the pixels that have a probability >= to the thresh. loc = np.where(digit_res >= thresh) # Create a list that contains the x position and the digit. for pt in zip(*loc[::-1]): digit_list.append((digit, pt[0], digit_res[pt[1]][pt[0]])) digit += 1 return digit_list
def calculate_score(self, img): mf = cv2.matchTemplate(img.astype('Float32'), self._template, cv2.TM_CCOEFF_NORMED) min_value, max_value, min_loc, max_loc = cv2.minMaxLoc(mf) return max_value
def getMatches(image, template, threshold): result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) # screen.showImage(result) loc = np.where( result >= threshold) results = zip(*loc[::-1]) return results # Highlight regions of interest in an image
def test_find_scene_by_tree(): scenes = build_scene_tree() # names = [os.path.join('scene', c) for c in os.listdir('scene')] imgs = {} for n in os.listdir('scene'): i = cv2.imread(os.path.join('scene', n))#, cv2.IMREAD_GRAYSCALE) i = cv2.resize(i, (960, 540)) imgs[n] = i def find_match(node, img): # for root node if node.parent is None: for k, v in node.iteritems(): res = find_match(v, img) if res is not None: return res return node # find in this node if node.tmpl is not None: s_bgr = cv2.split(node.tmpl) # Blue Green Red i_bgr = cv2.split(img) weight = (0.3, 0.3, 0.4) resbgr = [0, 0, 0] for i in range(3): # bgr resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED) match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2] # match = cv2.matchTemplate(img, node.tmpl, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) # found! if max_val > 0.7: x, y = max_loc h, w = node.tmpl.shape[:2] cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2) # find in children for k, v in node.iteritems(): res = find_match(v, img) if res is not None: return res return node for name, img in imgs.iteritems(): cur = find_match(scenes, img) print '%20s %s' % (name, cur) cv2.imshow('img', img) cv2.waitKey()
def match(self, im0, im1, hm0, hm1): viz = False mask0 = self.BG0.segment(im0) mask1 = self.BG1.segment(im1) im0 = im0 * (mask0>1e-10).astype('uint8')[:,:,np.newaxis] im1 = im1 * (mask1>1e-10).astype('uint8')[:,:,np.newaxis] if viz: viz0 = np.copy(im0) viz1 = np.copy(im1) pts14 = [] for chan in range(14): h0 = cv2.resize(hm0[:,:,chan], (ORIG_SIZE, ORIG_SIZE)) h1 = cv2.resize(hm1[:,:,chan], (ORIG_SIZE, ORIG_SIZE)) y0, x0 = argmax_2d(h0) y1, x1 = argmax_2d(h1) target = take_patch(im0, y0, x0, PATCH_SIZE) region = take_patch(im1, y1, x1, REGION_SIZE) res = cv2.matchTemplate(region, target, cv2.TM_CCOEFF_NORMED) _, _, _, top_left = cv2.minMaxLoc(res) top_left = top_left[::-1] center_in_region = (top_left[0] + PATCH_SIZE, top_left[1] + PATCH_SIZE) center_in_im1 = (center_in_region[0] + y1-REGION_SIZE, center_in_region[1] + x1-REGION_SIZE) if viz: cv2.circle(viz0, (x0,y0), 3, (0,0,255), -1) cv2.circle(viz1, tuple(center_in_im1[::-1]), 3, (0,0,255), -1) pts14.append([x0, y0, center_in_im1[1],center_in_im1[0]]) if viz: mask0 = cv2.cvtColor(mask0, cv2.COLOR_GRAY2RGB).astype('uint8') mask1 = cv2.cvtColor(mask1, cv2.COLOR_GRAY2RGB).astype('uint8') viz = np.concatenate((mask0, viz0,viz1, mask1),axis=1) cv2.imshow("v", viz) cv2.waitKey(1) return np.array(pts14) return viz, np.array(pts14) #rv = np.copy(region) #rv[center_in_region[0],center_in_region[1]] = (0,0,255) #tv = cv2.resize(target, tuple(region.shape[:2][::-1])) #hv = np.zeros((region.shape), dtype='float32') #res = res - res.min() #res = res / res.max() * 255 #res = cv2.cvtColor(res, cv2.COLOR_GRAY2RGB) #hv[PATCH_SIZE:PATCH_SIZE+res.shape[0],PATCH_SIZE:PATCH_SIZE+res.shape[1],:] = res #region = np.concatenate((region, rv, tv, hv), axis=1) #cv2.imwrite("patchmatch/region{}.png".format(chan), region)
def trackObjects(self): for area in self.trackedAreasList: # Template matching gray = cv2.cvtColor(self.processedFrame, cv2.COLOR_BGR2GRAY) templ = area.getGrayStackAve() cc = cv2.matchTemplate(gray, templ, cv2.TM_CCOEFF_NORMED) cc = cc * cc * cc * cc _, cc = cv2.threshold(cc, 0.1, 0, cv2.THRESH_TOZERO) cc8 = cv2.normalize(cc, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) mask = np.zeros_like(cc8) # Search match within template region mcorn = area.getEnlargedCorners(0) # If not 0, enalrge the search cv2.rectangle(mask, mcorn[0], mcorn[1], 255, -1) _, _, _, mx = cv2.minMaxLoc(cc8, mask) # kp = area.getKalmanPredict() # area.updateWindow(kp) # area.setTemplate(self.processedFrame) # Prevent large spatial jumps (c, r, _, _) = area.getcrwh() jump = 10 if abs(c - mx[0]) < jump and abs(r - mx[1]) < jump: # area.setKalmanCorrect(mx) area.updateWindow(mx) else: # area.setKalmanCorrect((c, r)) area.updateWindow((c, r)) area.setTemplate(self.processedFrame) # Show the template stack if self.showTemplate is True: cv2.imshow('Stack: '+str(area), area.getStack()) else: try: cv2.destroyWindow('Stack: '+str(area)) except: pass # Show the matching results if self.showMatch is True: cv2.rectangle(cc8, mcorn[0], mcorn[1], 255, 1) cv2.circle(cc8, mx, 5, 255, 1) cv2.imshow('Match: '+str(area), cc8) else: try: cv2.destroyWindow('Match: '+str(area)) except: pass # Draw the tracked area on the image corn = area.getCorners() cv2.rectangle(self.workingFrame, corn[0], corn[1], (0, 255, 0), 1) # self.showFrame() # raw_input('wait')
def detect_match_chunks(self, max_error=.06): percent = cv2.imread("assets/pct.png") corr_series = [] for (time, scene) in self.sample_frames(interval=self.polling_interval): cv2.imwrite("scene.png", scene) scene = cv2.imread("scene.png") scaled_percent = cv2.resize( percent, (0, 0), fx=self.scale, fy=self.scale) scaled_percent = cv2.Canny(scaled_percent, 50, 200) percent_corrs = [] for port_number, roi in enumerate(self.ports): if roi is not None: scene_roi = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)] scene_roi = cv2.Canny(scene_roi, 50, 200) corr_map = cv2.matchTemplate(scene_roi, scaled_percent, cv2.TM_CCOEFF_NORMED) _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map) percent_corrs.append(max_corr) point = [time, max(percent_corrs)] corr_series.append(point) corr_series = np.array(corr_series) medians = pd.rolling_median(corr_series[:, 1], self.min_gap // self.polling_interval, center=True)[2:-2] clusters = DBSCAN(eps=0.03, min_samples=10).fit(medians.reshape(-1, 1)) dataframe = list(zip(corr_series[:, 0][2:-2], medians, clusters.labels_)) labels = list(set(x[2] for x in dataframe)) cluster_means = [sum(cluster) / len(cluster) for cluster in [[x[1] for x in dataframe if x[2] == label] for label in labels]] cluster_means = list(zip(labels, cluster_means)) game_label = max(cluster_means, key=lambda x: x[1])[0] game_groups = [(k, list(v)) for k, v in groupby(dataframe, lambda pt: pt[2])] games = [[v[0][0], v[-1][0]] for k, v in game_groups if k == game_label] return games
def __detect_match_chunks(self, max_error=.04): percent = cv2.imread("assets/pct.png") corr_series = [] for (time, scene) in spaced_frames(self, interval=self.polling_interval): cv2.imwrite("scene.png", scene) scene = cv2.imread("scene.png") scaled_percent = cv2.resize( percent, (0, 0), fx=self.scale, fy=self.scale) scaled_percent = cv2.Canny(scaled_percent, 50, 200) percent_corrs = [] for port_number, roi in enumerate(self.ports): if roi is not None: scene_roi = scene[roi.top:roi.bottom, roi.left:roi.right] scene_roi = cv2.Canny(scene_roi, 50, 200) corr_map = cv2.matchTemplate( scene_roi, scaled_percent, cv2.TM_CCOEFF_NORMED) _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map) percent_corrs.append(max_corr) point = [time, max(percent_corrs)] corr_series.append(point) corr_series = np.array(corr_series) def moving_average(series, n=5): return np.convolve(series, np.ones((n,)) / n, mode='valid') medians = rolling_median(corr_series[:, 1], self.min_gap // self.polling_interval, center=True)[2:-2] clusters = DBSCAN(eps=0.05, min_samples=10).fit(medians.reshape(-1, 1)) centers = kmeans.cluster_centers_ points = zip([time + (self.min_gap / 2) for time, corr in corr_series], kmeans.labels_) # Throw out the lowest cluster groups = [(k, list(v)) for k, v in groupby(points, lambda pt: centers[pt[1]] > max(min(centers), .2))] games = [[v[0][0], v[-1][0]] for k, v in groups if k] return games
def matchAB(fileA, fileB): ''' fileA?fileB??????????????? ''' # ??????? imgA = cv2.imread(fileA) imgB = cv2.imread(fileB) # ????? grayA = cv2.cvtColor(imgA, cv2.COLOR_BGR2GRAY) grayB = cv2.cvtColor(imgB, cv2.COLOR_BGR2GRAY) # ???????? height, width = grayA.shape # ????????????????? result_window = np.zeros((height, width), dtype=imgA.dtype) for start_y in range(0, height-100, 50): for start_x in range(0, width-100, 50): window = grayA[start_y:start_y+100, start_x:start_x+100] match = cv2.matchTemplate(grayB, window, cv2.TM_CCOEFF_NORMED) _, _, _, max_loc = cv2.minMaxLoc(match) matched_window = grayB[max_loc[1]:max_loc[1]+100, max_loc[0]:max_loc[0]+100] result = cv2.absdiff(window, matched_window) result_window[start_y:start_y+100, start_x:start_x+100] = result # ????????????????????????????? _, result_window_bin = cv2.threshold(result_window, 127, 255, cv2.THRESH_BINARY) _, contours, _ = cv2.findContours(result_window_bin, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) imgC = imgA.copy() for contour in contours: min = np.nanmin(contour, 0) max = np.nanmax(contour, 0) loc1 = (min[0][0], min[0][1]) loc2 = (max[0][0], max[0][1]) cv2.rectangle(imgC, loc1, loc2, 255, 2) # ?????? plt.subplot(1, 3, 1), plt.imshow(cv2.cvtColor(imgA, cv2.COLOR_BGR2RGB)), plt.title('A'), plt.xticks([]), plt.yticks([]) plt.subplot(1, 3, 2), plt.imshow(cv2.cvtColor(imgB, cv2.COLOR_BGR2RGB)), plt.title('B'), plt.xticks([]), plt.yticks([]) plt.subplot(1, 3, 3), plt.imshow(cv2.cvtColor(imgC, cv2.COLOR_BGR2RGB)), plt.title('Answer'), plt.xticks([]), plt.yticks([]) plt.show()
def match_template_opencv(template, image, options): """ Match template using OpenCV template matching implementation. Limited by number of channels as maximum of 3. Suitable for direct RGB or Gray-scale matching :param options: Other options: - distance: Distance measure to use. (euclidean | correlation | ccoeff). Default: 'correlation' - normalize: Heatmap values will be in the range of 0 to 1. Default: True - retain_size: Whether to retain the same size as input image. Default: True :return: Heatmap """ # if image has more than 3 channels, use own implementation if len(image.shape) > 3: return match_template(template, image, options) op = _DEF_TM_OPT.copy() if options is not None: op.update(options) method = cv.TM_CCORR_NORMED if op['normalize'] and op['distance'] == 'euclidean': method = cv.TM_SQDIFF_NORMED elif op['distance'] == 'euclidean': method = cv.TM_SQDIFF elif op['normalize'] and op['distance'] == 'ccoeff': method = cv.TM_CCOEFF_NORMED elif op['distance'] == 'ccoeff': method = cv.TM_CCOEFF elif not op['normalize'] and op['distance'] == 'correlation': method = cv.TM_CCORR heatmap = cv.matchTemplate(image, template, method) # make minimum peak heatmap if method not in [cv.TM_SQDIFF, cv.TM_SQDIFF_NORMED]: heatmap = heatmap.max() - heatmap if op['normalize']: heatmap /= heatmap.max() # size if op['retain_size']: hmap = np.ones(image.shape[:2]) * heatmap.max() h, w = heatmap.shape hmap[:h, :w] = heatmap heatmap = hmap return heatmap