我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.matchTemplate()。
def get_match_confidence(img1, img2, mask=None): if img1.shape != img2.shape: return False ## first try, using absdiff # diff = cv2.absdiff(img1, img2) # h, w, d = diff.shape # total = h*w*d # num = (diff<20).sum() # print 'is_match', total, num # return num > total*0.90 if mask is not None: img1 = img1.copy() img1[mask!=0] = 0 img2 = img2.copy() img2[mask!=0] = 0 ## using match match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED) _, confidence, _, _ = cv2.minMaxLoc(match) # print confidence return confidence
def locate_img(image, template): img = image.copy() res = cv2.matchTemplate(img, template, method) print res print res.shape cv2.imwrite('image/shape.png', res) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) print cv2.minMaxLoc(res) if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc h, w = template.shape bottom_right = (top_left[0] + w, top_left[1]+h) cv2.rectangle(img, top_left, bottom_right, 255, 2) cv2.imwrite('image/tt.jpg', img)
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200): if roi is not None: scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)] if not scale: scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr) peaks = [] if scale: scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) canny_scene = cv2.Canny(scene, threshold_min, threshold_max) canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max) # Threshold for peaks. corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED) _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map) good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance))) if debug: print(max_corr, good_points) clusters = self.get_clusters(good_points, max_distance=max_distance) peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters] return (scale, peaks)
def detectmarker(image): grayscale = getgrayimage(image) mkradius = getapproxmarkerradius(grayscale) # approximate marker radius marker = cv2.resize(MARKER, (mkradius*2, mkradius*2)) # resize the marker #template matching matched = cv2.matchTemplate(grayscale, marker, cv2.TM_CCORR_NORMED) #returns float32 #detect 4 greatest values markerposarray = [] for i in range(4): (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched) markerposarray.append(tuple(map(lambda x: x+mkradius, maxloc))) cv2.circle(matched, maxloc, mkradius, (0.0), -1) #ignore near the current minloc return markerposarray
def find_best_scale(self, feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.03, min_corr=0.8): best_corr = 0 best_scale = 0 for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta): scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED) _, max_val, _, _ = cv2.minMaxLoc(result) if max_val > best_corr: best_corr = max_val best_scale = scale if best_corr > min_corr: return best_scale else: return None
def find_best_scale(feature, scene, min_scale=0.5, max_scale=1.0, scale_delta=0.02, min_corr=0.8): best_corr = 0 best_scale = 0 scale = min_scale for scale in np.arange(min_scale, max_scale + scale_delta, scale_delta): scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) result = cv2.matchTemplate(scene, scaled_feature, cv2.TM_CCOEFF_NORMED) _, max_val, _, max_loc = cv2.minMaxLoc(result) if max_val > best_corr: best_corr = max_val best_scale = scale if best_corr > min_corr: return best_scale else: return None
def find_float(img_name): print 'Looking for float' # todo: maybe make some universal float without background? for x in range(0, 7): template = cv2.imread('var/fishing_float_' + str(x) + '.png', 0) img_rgb = cv2.imread(img_name) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) # print('got images') w, h = template.shape[::-1] res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED) threshold = 0.6 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]): cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2) if loc[0].any(): print 'Found ' + str(x) + ' float' if dev: cv2.imwrite('var/fishing_session_' + str(int(time.time())) + '_success.png', img_rgb) return (loc[1][0] + w / 2) / 2, (loc[0][0] + h / 2) / 2
def play_game(get_command_callback: Callable[[int, int, int], str]) -> int: with mss() as screenshotter: get_game_landscape_and_set_focus_or_die(screenshotter) reset_game() landscape = get_game_landscape_and_set_focus_or_die(screenshotter, .95) start_game() gameover_template = cv2.imread(os.path.join('templates', 'dino_gameover.png'), 0) start = time.time() last_distance = landscape['width'] x1, x2, y1, y2 = compute_region_of_interest(landscape) speed = 0 last_compute_speed = time.time() last_speeds = [3] * 30 last_command_time = time.time() while True: buffer = screenshotter.grab(landscape) image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L') image = np.array(image) image += np.abs(247 - image[0, x2]) roi = image[y1:y2, x1:x2] score = int(time.time() - start) distance, size = compute_distance_and_size(roi, x2) speed = compute_speed(distance, last_distance, speed, last_speeds, last_compute_speed) last_compute_speed = time.time() # Check GAME OVER if distance == last_distance or distance == 0: res = cv2.matchTemplate(image, gameover_template, cv2.TM_CCOEFF_NORMED) if np.where(res >= 0.7)[0]: reset_game() return score last_distance = distance if time.time() - last_command_time < 0.6: continue command = get_command_callback(distance, size, speed) if command: last_command_time = time.time() pyautogui.press(command)
def find_game_position(screenshotter, threshold) -> Dict: dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0) w, h = dino_template.shape[::-1] landscape_template = cv2.imread(os.path.join('templates', 'dino_landscape.png'), 0) lw, lh = landscape_template.shape[::-1] monitor = screenshotter.monitors[0] buffer = screenshotter.grab(monitor) image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L') image = np.array(image) res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= threshold) if len(loc[0]): pt = next(zip(*loc[::-1])) return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw) return {}
def test_crop_random(): # Given one sample image and the following parameters image = helpers.get_one_sample_image() parameters = {"dst_size" : (20, 20), "n_patches" : 5, } # When perform crop_random() patches = utils.crop_random(image, parameters["dst_size"], parameters["n_patches"]) # Then every patch should be included in an image. match_cost = [] for patch in patches: M = cv2.matchTemplate(image, patch, cv2.TM_SQDIFF) min_cost, _, _, _ = cv2.minMaxLoc(M) match_cost.append(min_cost) assert np.array(match_cost).all() == 0, "utils.crop_random() unit test failed!!"
def reconocedor(img): fil, col = img.shape[:2] #cv2.imshow('Origin', img) contador = 0 respuesta = 0 for filename in glob.glob('seniales/*.jpg'): im= cv2.imread(filename) im = cv2.resize(im, (col,fil)) res = cv2.matchTemplate(img,im,cv2.TM_CCORR) threshold = 0.9 while ((res[0])[0] > 10): (res[0])[0] = (res[0])[0] / 10; loc = (res[0])[0]/10 >= threshold contador = contador +1 if(loc): respuesta = contador #cv2.imshow(filename, im) #cv2.waitKey() # Permanece la imagen en pantalla hasta presionar una tecla #cv2.destroyAllWindows() # Cierra todas las ventanas abiertas return respuesta;
def match_template(screenshot, template): # Perform match template calculation matches = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED) # Survey results (min_val, max_val, min_loc, max_loc) = cv2.minMaxLoc(matches) # Load template size (template_height, template_width) = template.shape[:2] return { "x1": max_loc[0], "y1": max_loc[1], "x2": max_loc[0] + template_width, "y2": max_loc[1] + template_height, "center": { "x": max_loc[0] + (template_width / 2), "y": max_loc[1] + (template_height / 2) }, "score": max_val }
def findTarget(self): result = cv2.matchTemplate(self.current_frame, self.root_patch.patch, self.match_method) _, _, _, max_loc = cv2.minMaxLoc(result) # Select found target target_top_left = max_loc target_bottom_right = ( target_top_left[0] + self.patch_w, target_top_left[1] + self.patch_h) # Update Patch with current info patch = self.root_patch.copy() patch.patch = self.current_frame[ target_top_left[1]: target_bottom_right[1] + 1, target_top_left[0]: target_bottom_right[0] + 1, :] patch.p1 = Point(x=target_top_left, y=target_bottom_right) self.assignRootPatch(patch) self.tracker = KCFTracker(True, True, True) self.tracker.init( [target_top_left[0], target_top_left[1], self.patch_w, self.patch_h], self.current_frame) return (target_top_left, target_bottom_right)
def __get_uniq_faces_curr_frame_template_match(self, frame_id, frame_prev, faces_roi): logger.info("[{0}] Face Similarity: # of faces in current frame - {1}".format(frame_id, len(faces_roi))) # First Time if frame_prev.size == 0: return len(faces_roi) uniq_faces_curr_frame = 0 for template_roi in faces_roi: # Apply template Matching res = cv2.matchTemplate(frame_prev, template_roi, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) logger.info("[{0}] {1},{2},{3},{4}".format(frame_id, min_val, max_val, min_loc, max_loc)) logger.info("[{0}] Total Unique Faces in Current Frame: {1}".format(frame_id, uniq_faces_curr_frame)) return uniq_faces_curr_frame
def matchTemplate(img_full, img_template, meth): w, h = img_template.shape[::-1] img = img_full.copy() # Apply template Matching method = eval(meth) res = cv2.matchTemplate(img,img_template,method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) return [top_left, bottom_right]
def __init__(self): t = ImageGrab.grab().convert("RGB") self.screen = cv2.cvtColor(numpy.array(t), cv2.COLOR_RGB2BGR) self.ultLoader = ImageLoader('image/ult/') if self.have('topleft'): tl = self._imageLoader.get('topleft') res = cv2.matchTemplate(self.screen, tl, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) x1, y1 = max_loc rd = self._imageLoader.get('rightdown') res = cv2.matchTemplate(self.screen, rd, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) x2, y2 = max_loc # default 989 GameStatus().y = y2 - y1 GameStatus().use_Droid4X = True
def find_list(self, name): cards = [] res = cv2.matchTemplate(self.screen, self._imageLoader.get(name), cv2.TM_CCOEFF_NORMED) threshold = 0.8 loc = numpy.where(res >= threshold) x = 0 t = sorted(zip(*loc[::-1])) for pt in t: if abs(x - pt[0]) > 100 or x == 0: x = pt[0] cards.append((pt[0], pt[1])) else: continue self.log(name + ': ' + str(len(cards))) return cards
def detect(self, image): # convert image to grayscale image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # apply template matching result = cv2.matchTemplate(image_gray, self.template, cv2.TM_CCOEFF_NORMED) # obtain locations, where threshold met locations = np.where(result >= self.THRESHOLD) for item in locations: if len(item) == 0: return None return locations
def check_list(self): items_dict = imd.ImageStorage() items_dict = items_dict.pickled_dict RS.press_button('equipment') time.sleep(1) for key in items_dict.keys(): template = items_dict[key] #save for DEBUG #cv2.imwrite('debug_template_file', template_) w, h = template.shape[::-1] pattern = RS.get_bag('only','gray') res = cv2.matchTemplate(pattern,template,cv2.TM_CCOEFF_NORMED) threshold = .8 #default is 8 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]):#goes through each found image print('{} found'.format(key)) break else: print('{} not found'.format(key))
def this(img_pat, img_temp): """pass img_pat as a cv2 image format, img_temp as a file Passed Function to do w/e after finding img_temp""" cwd = os.getcwd() if cwd not in img_temp: img_temp = cwd+img_temp if '.png' not in img_temp: img_temp = cwd+img_temp+'.png' #print for DEBUG #print(img_temp) #img_temp img_temp = cv2.imread(img_temp,0) #save for DEBUG #cv2.imwrite('img_temp', img_temp) w, h = img_temp.shape[::-1] res = cv2.matchTemplate(img_pat,img_temp,cv2.TM_CCOEFF_NORMED) threshold = .8 #default is 8 loc = np.where( res >= threshold) return loc, w, h
def images(img_pat, img_temp,x,y, func): w, h = img_temp.shape[::-1] try: res = cv2.matchTemplate(img_temp,img_pat,cv2.TM_CCOEFF_NORMED) except Exception as e: print("cannot match") print(e) threshold = .9 #default is 8 loc = np.where( res >= threshold) for pt in zip(*loc[::-1]):#goes through each found image func(img_pat, x, y, pt, w, h) return 0 return 1 #return loc to be iterable outisde the function #also sometimes width and height of image is needed
def exists(image, template, thresh): """ Returns True if template is in Image with probability of at least thresh :param image: :param template: :param thresh: :return: """ digit_res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) loc = np.where(digit_res >= thresh) if len(loc[-1]) == 0: return False for pt in zip(*loc[::-1]): if digit_res[pt[1]][pt[0]] == 1: return False return True
def find_template(template): method = 'cv2.TM_CCOEFF' w, h = template.shape[::-1] res = cv2.matchTemplate(image, template, eval(method)) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) return top_left, bottom_right, res
def identify_summons(image_path): import cv2 import numpy as np image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2GRAY) summons = [] points = 0 for file_name, (point_value, actual_name) in possible_summons.items(): template = cv2.imread(os.path.join('screenshots', 'summons', file_name + '.png'), cv2.IMREAD_GRAYSCALE) res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= CLOSENESS_THRESHOLD) for pt in zip(*loc[::-1]): # Due to weird behaviour, only add one instance of each summon if actual_name in summons: continue summons.append(actual_name) points += point_value return (summons, points)
def image_is_on_screen(template_name): template = cv2.imread(os.path.join( 'screenshots', template_name + '.png'), cv2.IMREAD_GRAYSCALE) image = cv2.cvtColor( np.array(pyautogui.screenshot( region=(0, 0, 1300, 750))), cv2.COLOR_BGR2GRAY) res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) loc = np.where(res >= CLOSENESS_THRESHOLD) # Not sure why this works but okay for pt in zip(*loc[::-1]): return True return False
def detectTemplateMatching(self, img): self.templateMatchingCurrentTime = cv2.getTickCount() duration = (self.templateMatchingCurrentTime - self.templateMatchingStartTime)/cv2.getTickFrequency() if duration > settings.templateMatchingDuration or self.trackedFaceTemplate[2] == 0 or self.trackedFaceTemplate[3] == 0: self.foundFace = False self.isTemplateMatchingRunning = False return faceTemplate = self.getSubRect(img, self.trackedFaceTemplate) roi = self.getSubRect(img, self.trackedFaceROI) match = cv2.matchTemplate(roi, faceTemplate, cv2.TM_SQDIFF_NORMED) cv2.normalize(match, match, 0, 1, cv2.NORM_MINMAX, -1) minVal, maxVal, minLoc, maxLoc = cv2.minMaxLoc(match) foundTemplate = ( minLoc[0] + self.trackedFaceROI[0], minLoc[1] + self.trackedFaceROI[1], self.trackedFaceTemplate[2], self.trackedFaceTemplate[3]) self.trackedFaceTemplate = foundTemplate self.trackedFace = self.scaleRect(self.trackedFaceTemplate, img, 2) self.trackedFaceROI = self.scaleRect(self.trackedFace, img, 2)
def detect(self, template): template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) if self.is_cards_screen: template = self.scale_template_for_cards_screen(template) result = cv2.matchTemplate(self.original, template, cv2.TM_CCOEFF_NORMED) loc = np.where(result >= self.threshold) points = zip(*loc[::-1]) if len(points) > 0: return HeroDetector.combine_points(points) return None # Scale template down if we're on the game-over screen since the hero # portraits are smaller there than during the game.
def test_templating(self): bbox = self.wh.create_boundingbox() scaled_bbox = self.wh.bbox_scale(bbox,0.5) sub_image = self.px.grab_window(scaled_bbox) sub_image = self.px.img_to_numpy(sub_image) w, h = sub_image.shape[0:2] main_image = cv2.imread('pytomatic/tests/assets/calc_clean.PNG') res = cv2.matchTemplate(main_image, sub_image, cv2.TM_CCOEFF) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) cv2.rectangle(main_image, top_left, bottom_right, 255, 2) assert top_left == (89,89) assert bottom_right == (290,290) #plt.imshow(main_image) #plt.show()
def getMatchingScore(img,digit): score = (cv2.matchTemplate(img,cv2.imread('Templates/' + 'T'+str(digit) + '.jpg',0),cv2.TM_SQDIFF)/2000) return score # Gets the best prediction of the digit in a cell using template matching
def removeInnerGridLines(img): template = cv2.imread('cross_template.jpg',0) (tx,ty) = np.shape(template) res = cv2.matchTemplate(img,template,cv2.TM_SQDIFF_NORMED) threshold = 0.1 loc = np.where( res <= threshold) for pt in zip(*loc[::-1]): x = pt[0] y = pt[1] img,area,dummy = customFloodFill(img,(x + int(tx/2),y + int(ty/2)),0,0) return img # Reads in image of sudoku and does processing
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55): """ Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer. @threshold percentage of similarity """ __readDigitTemplates() digit = digit.copy() if digit.shape[2] == 3: digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY) ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV) bestDigit = -1 if method == REC_METHOD_TEMPLATE_MATCHING: bestMatch = None for i in range(len(__DIGIT_TEMPLATES)): template = __DIGIT_TEMPLATES[i].copy() if digit.shape[1] < template.shape[1]: template = cv2.resize(template, (digit.shape[1], digit.shape[0])) else: digit = cv2.resize(digit, (template.shape[1], template.shape[0])) result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED) (_, max_val, _, max_loc) = cv2.minMaxLoc(result) if bestMatch is None or max_val > bestMatch: bestMatch = max_val bestDigit = i print("New Best Match:", bestMatch, bestDigit) if (bestMatch * 100) >= threshold: return (bestDigit, bestMatch * 100) return (-1, 0)
def find_matches(img, template_list): # Make a copy of the image to draw on # Define an empty list to take bbox coords bbox_list = [] # Iterate through template list # Read in templates one by one # Use cv2.matchTemplate() to search the image # using whichever of the OpenCV search methods you prefer # Use cv2.minMaxLoc() to extract the location of the best match # Determine bounding box corners for the match # Return the list of bounding boxes method = cv2.TM_CCOEFF_NORMED for temp in templist: tmp = mpimg.imread(temp) # Apply template Matching res = cv2.matchTemplate(img,tmp,method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) w, h = (tmp.shape[1], tmp.shape[0]) # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) bbox_list.append((top_left, bottom_right)) return bbox_list
def ocr(): img = numpy.array(ImageGrab.grab().convert('RGB'))[:, :, ::-1].copy()[y:y+h, x:x+w][:,:,2] # img = cv2.equalizeHist(img) index=0 for tmp in templates: res = cv2.matchTemplate(img,tmp,cv2.TM_CCORR_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) ix,iy=max_loc[0]/pw,max_loc[1]/ph strx=txtbox[iy][ix].get() index=index+1 txtbox[iy][ix].insert(len(strx),str(index)) return
def getcroppedarea(img, markersize): #use template matching to detect area to be cropped grayimg = getgrayimage(img) # detect top-left marker using template matching marker_tl = cv2.resize(MARKER_TL, (markersize, markersize)) matched = cv2.matchTemplate(grayimg, marker_tl, cv2.TM_CCORR_NORMED) #returns float32 (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched) mkrect = getmarkerboundingrect(grayimg, maxloc, markersize) pos_tl = (mkrect.x+mkrect.w, mkrect.y+mkrect.h) #pos_tl = (maxloc[0]+markersize, maxloc[1]+markersize) # detect bottom-right marker using template matching marker_br = cv2.resize(MARKER_BR, (markersize, markersize)) matched = cv2.matchTemplate(grayimg, marker_br, cv2.TM_CCORR_NORMED) #returns float32 (minval, maxval, minloc, maxloc) = cv2.minMaxLoc(matched) mkrect = getmarkerboundingrect(grayimg, maxloc, markersize) pos_br = (mkrect.x, mkrect.y) #pos_br = maxloc #detect QR code qrarea = img[pos_br[1]:,:img.shape[0]-pos_br[1]] typ, val = passzbar.passzbar(qrarea) if not typ: return None, None strval = val.decode('ascii').strip() #print(strval) #cv2.circle(img, pos_tl, 5, (255, 0, 0), -1) #cv2.circle(img, pos_br, 5, (0, 255, 0), -1) #print(pos_tl, pos_br #cv2.imshow("hoge", img) #cv2.imshow("hoge", img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]]) # crop and return detected area return strval, img[pos_tl[1]:pos_br[1], pos_tl[0]:pos_br[0]]
def test_find_scene(): scenes = {} for s in os.listdir('txxscene'): if '-' in s: continue i = cv2.imread(os.path.join('txxscene', s), cv2.IMREAD_GRAYSCALE) scenes[s] = i # names = [os.path.join('scene', c) for c in os.listdir('scene')] imgs = {} for n in os.listdir('scene'): i = cv2.imread(os.path.join('scene', n), cv2.IMREAD_GRAYSCALE) i = cv2.resize(i, (960, 540)) imgs[n] = i for name, img in imgs.iteritems(): for scene, tmpl in scenes.iteritems(): res = cv2.matchTemplate(img, tmpl, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val < 0.6: continue x, y = max_loc h, w = tmpl.shape cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2) print name, scene, max_val, min_val cv2.imshow('found', img) cv2.waitKey()
def find_match(img, tmpl, rect=None, mask=None): if rect is not None: h, w = img.shape[:2] x, y, x1, y1 = rect if x1 > w or y1 > h: return 0, None img = img[y:y1, x:x1, :] if mask is not None: img = img.copy() img[mask!=0] = 0 tmpl = tmpl.copy() tmpl[mask!=0] = 0 s_bgr = cv2.split(tmpl) # Blue Green Red i_bgr = cv2.split(img) weight = (0.3, 0.3, 0.4) resbgr = [0, 0, 0] for i in range(3): # bgr resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED) match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2] min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match) confidence = max_val x, y = max_loc h, w = tmpl.shape[:2] if rect is None: rect = (x, y, x+w, y+h) # cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2) # cv2.imshow('test', img) # cv2.waitKey(20) return confidence, rect
def match(self, templateimage, threshold=0.8): image = cv2.imread(self.sourceimage) template = cv2.imread(templateimage) result = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) similarity = cv2.minMaxLoc(result)[1] if similarity < threshold: return similarity else: return np.unravel_index(result.argmax(), result.shape)
def template_match(img_master, img_slave, method = 'cv2.TM_CCOEFF_NORMED', mlx = 1, mly = 1, show=True): # Apply image oversampling img_master = cv2.resize(img_master,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC) img_slave = cv2.resize(img_slave,None,fx=mlx, fy=mly, interpolation = cv2.INTER_CUBIC) res = cv2.matchTemplate(img_slave,img_master,eval(method)) w, h = img_master.shape[::-1] min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # Control if the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum value if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]: top_left = min_loc else: top_left = max_loc bottom_right = (top_left[0] + w, top_left[1] + h) # Retrieve center coordinates px = (top_left[0]+bottom_right[0])/(2.0*mlx) py = (top_left[1]+bottom_right[1])/(2.0*mly) # Scale images for visualization img_master_scaled = cv2.convertScaleAbs(img_master, alpha=(255.0/500)) img_slave_scaled = cv2.convertScaleAbs(img_slave, alpha=(255.0/500)) cv2.rectangle(img_slave_scaled,top_left, bottom_right, 255, 2*mlx) if show == True: plt.figure(figsize=(20,10)) plt.subplot(131),plt.imshow(res,cmap = 'gray') plt.title('Matching Result'), plt.xticks([]), plt.yticks([]) plt.subplot(132),plt.imshow(img_master_scaled,cmap = 'gray') plt.title('Detected Point'), plt.xticks([]), plt.yticks([]) plt.subplot(133),plt.imshow(img_slave_scaled, cmap = 'gray') plt.suptitle(method) plt.show() return px, py, max_val
def fit(img, templates, start_percent, stop_percent, threshold): img_width, img_height = img.shape[::-1] best_location_count = -1 best_locations = [] best_scale = 1 plt.axis([0, 2, 0, 1]) plt.show(block=False) x = [] y = [] for scale in [i/100.0 for i in range(start_percent, stop_percent + 1, 3)]: locations = [] location_count = 0 for template in templates: template = cv2.resize(template, None, fx = scale, fy = scale, interpolation = cv2.INTER_CUBIC) result = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED) result = np.where(result >= threshold) location_count += len(result[0]) locations += [result] print("scale: {0}, hits: {1}".format(scale, location_count)) x.append(location_count) y.append(scale) plt.plot(y, x) plt.pause(0.00001) if (location_count > best_location_count): best_location_count = location_count best_locations = locations best_scale = scale plt.axis([0, 2, 0, best_location_count]) elif (location_count < best_location_count): pass plt.close() return best_locations, best_scale
def match_template_mask(image, template, mask=None, method=None, sigma=0.33): """Match template against image applying mask to template using method. Method can be either of (None, 'laplacian', 'sobel', 'scharr', 'prewitt', 'roberts', 'canny'). Returns locations to look for max values.""" if mask is not None: if method: kernel = np.ones((3, 3), np.uint8) mask = cv2.erode(mask, kernel) if method == 'laplacian': # use CV_64F to not loose edges, convert to uint8 afterwards edge_image = np.uint8(np.absolute( cv2.Laplacian(image, cv2.CV_64F))) edge_template = np.uint8(np.absolute( cv2.Laplacian(template, cv2.CV_64F) )) elif method in ('sobel', 'scharr', 'prewitt', 'roberts'): filter_func = getattr(skfilters, method) edge_image = filter_func(image) edge_template = filter_func(template) edge_image = convert(edge_image) edge_template = convert(edge_template) else: # method == 'canny' values = np.hstack([image.ravel(), template.ravel()]) median = np.median(values) lower = int(max(0, (1.0 - sigma) * median)) upper = int(min(255, (1.0 + sigma) * median)) edge_image = cv2.Canny(image, lower, upper) edge_template = cv2.Canny(template, lower, upper) results = cv2.matchTemplate(edge_image, edge_template & mask, cv2.TM_CCOEFF_NORMED) else: results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED, mask) else: results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) return results
def MatchTemplate(template, target): """Returns match score for given template""" res = cv2.matchTemplate(target, template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) return max_val
def matchTemplate(self, img, template): res = cv2.matchTemplate(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), template, cv2.TM_CCOEFF_NORMED) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) top_left = max_loc bottom_right = (top_left[0] + template.shape[1], top_left[1] + template.shape[0]) return top_left, bottom_right
def getScoreboard(self, img): template_width = self.TEMPLATE_SCOREBOARD.shape[1] img_width = img.shape[1] template = imutils.resize(self.TEMPLATE_SCOREBOARD, width=int(template_width/1280.0*img_width)) top_left, bottom_right = self.matchTemplate(img, template) return img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]]
def getTopBar(self, img): template_width = self.TEMPLATE_TOP.shape[1] img_width = img.shape[1] template = imutils.resize(self.TEMPLATE_TOP, width=int(template_width/1280.0*img_width)) top_left, bottom_right = self.matchTemplate(img, template) located = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]] h, w = located.shape[:2] return located[:, int(w*0.125):int(w*0.5)]
def getTimeArea(self, img): template_width = self.TEMPLATE_TIME.shape[1] img_width = img.shape[1] template = imutils.resize(self.TEMPLATE_TIME, width=int(template_width/1280.0*img_width)) top_left, bottom_right = self.matchTemplate(img, template) located = img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0]] h, w = located.shape[:2] return located[int(h*0.16):int(h*0.84), int(w*0.42):int(w*0.58)]