我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.THRESH_BINARY_INV。
def getmarkerboundingrect(img, mkpos, mksize): buffer = int(mksize * 0.15) x = mkpos[0] - buffer y = mkpos[1] - buffer w = mksize + buffer*2 h = mksize + buffer*2 roi = img[y:y+h, x:x+w] grayroi = getgrayimage(roi) ret, binimage = cv2.threshold(grayroi,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimage) # stats[0], centroids[0] are for the background label. ignore # cv2.CC_STAT_LEFT, cv2.CC_STAT_TOP, cv2.CC_STAT_WIDTH, cv2.CC_STAT_HEIGHT lblareas = stats[1:,cv2.CC_STAT_AREA] imax = max(enumerate(lblareas), key=(lambda x: x[1]))[0] + 1 boundingrect = Rect(stats[imax, cv2.CC_STAT_LEFT], stats[imax, cv2.CC_STAT_TOP], stats[imax, cv2.CC_STAT_WIDTH], stats[imax, cv2.CC_STAT_HEIGHT]) return boundingrect.addoffset((x,y))
def getmarkercenter(image, pos): mkradius = getapproxmarkerradius(image) buffer = int(mkradius * 0.15) roisize = mkradius + buffer # half of the height or width x = pos[0] - roisize y = pos[1] - roisize w = 2 * roisize h = 2 * roisize roi = image[y:y+h, x:x+w] grayroi = getgrayimage(roi) ret, binimage = cv2.threshold(grayroi,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimage) # stats[0], centroids[0] are for the background label. ignore lblareas = stats[1:,cv2.CC_STAT_AREA] ave = np.average(centroids[1:], axis=0, weights=lblareas) return tuple(np.array([x, y]) + ave) # weighted average pos of centroids
def adaptive_threshold(image, above_thresh_assigned=255, kind='mean', cell_size=35, c_param=17, thresh_style=cv.THRESH_BINARY_INV): ''' :param kind: specify adaptive method, whether 'mean' or 'gaussian'. :param cell_size: n for the region size (n x n). :param c_param: subtraction constant. :return: a binary version of the input image. ''' if kind == 'mean': method = cv.ADAPTIVE_THRESH_MEAN_C elif kind == 'gaussian': method = cv.ADAPTIVE_THRESH_GAUSSIAN_C else: raise ValueError('Unknown adaptive threshold method.') return cv.adaptiveThreshold(image, above_thresh_assigned, method, thresh_style, cell_size, c_param)
def predict(url): global model # Read image image = io.imread(url) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC) # Use otsu to mask gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) mask = cv2.medianBlur(mask, 5) features = describe(image, mask) state = le.inverse_transform(model.predict([features]))[0] return {'type': state}
def find_chars(img): gray = np.array(img.convert("L")) ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY) image_final = cv2.bitwise_and(gray, gray, mask=mask) ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) dilated = cv2.dilate(new_img, kernel, iterations=1) # Image.fromarray(dilated).save('out.png') # for debugging _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) coords = [] for contour in contours: # get rectangle bounding contour [x, y, w, h] = cv2.boundingRect(contour) # ignore large chars (probably not chars) if w > 70 and h > 70: continue coords.append((x, y, w, h)) return coords # find list of eye coordinates in image
def test_initial_pass_through_compare(self): original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png")) against = self.provider.get_img_from_screen_shot() wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png")) # convert the images to grayscale original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True) against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True) wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True) # initialize the figure (score, diff) = compare_ssim(original, against, full=True) diff = (diff * 255).astype("uint8") self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail') (score, nothing) = compare_ssim(original, wrong, full=True) self.assertTrue(score < .90) if self.__debug_pictures__: # threshold the difference image, followed by finding contours to # obtain the regions of the two input images that differ thresh = cv2.threshold(diff, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] # loop over the contours for c in cnts: # compute the bounding box of the contour and then draw the # bounding box on both input images to represent where the two # images differ (x, y, w, h) = cv2.boundingRect(c) cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2) cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2) # show the output images diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh) images = ("Original", original), ("Against", against), ("Wrong", wrong) self.setup_compare_images(diffs) self.setup_compare_images(images)
def load(self, filename, analyze_only): # Load image, then do various conversions and thresholding. self.img_orig = cv2.imread(filename, cv2.IMREAD_COLOR) if self.img_orig is None: raise CompilerException("File '{}' not found".format(filename)) self.img_grey = cv2.cvtColor(self.img_orig, cv2.COLOR_BGR2GRAY) _, self.img_contour = cv2.threshold(self.img_grey, 250, 255, cv2.THRESH_BINARY_INV) _, self.img_text = cv2.threshold(self.img_grey, 150, 255, cv2.THRESH_BINARY) self.root_node = None self.contours = self.find_contours() self.contour_lines, self.contour_nodes = self.categorize_contours() self.build_graph() self.build_parse_tree() self.parse_nodes() if not analyze_only: self.python_ast = self.root_node.to_python_ast()
def foreground(self, image, smooth=False, grayscale=False): """ Extract foreground from background :param image: :param smooth: :param grayscale: :return: """ if smooth and grayscale: image = self.toGrayscale(image) image = self.smooth(image) elif smooth: image = self.smooth(image) elif grayscale: image = self.toGrayscale(image) fgmask = self.fgbg.apply(image) ret, mask = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY_INV) mask_inv = cv2.bitwise_not(mask) return mask_inv
def extract_color( src, h_th_low, h_th_up, s_th, v_th ): hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) if h_th_low > h_th_up: ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV) dst = cv2.bitwise_or(h_dst_1, h_dst_2) else: ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO) ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV) ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY) ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY) ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY) dst = cv2.bitwise_and(dst, s_dst) dst = cv2.bitwise_and(dst, v_dst) return dst
def camera_gesture_trigger(): # Capture frame-by-frame ret, frame = cap.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(5,5),0) ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) contours, hierarchy = cv2.findContours(thresh1,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) max_area=0 for i in range(len(contours)): cnt=contours[i] area = cv2.contourArea(cnt) if(area>max_area): max_area=area ci=i cnt=contours[ci] hull = cv2.convexHull(cnt) moments = cv2.moments(cnt) cnt = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) hull = cv2.convexHull(cnt,returnPoints = False) defects = cv2.convexityDefects(cnt,hull) if defects is not None: if defects.shape[0] >= 5: return 1 return 0
def thresholding(img_grey): """ This functions creates binary images using thresholding :param img_grey: greyscale image :return: binary image """ # # Adaptive Gaussian # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2) # Otsu's thresholding after Gaussian filtering blur = cv.GaussianBlur(img_grey, (5, 5), 0) ret3, img_binary = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU) # invert black = 255 ret, thresh1 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV) return thresh1
def thresholding(img_grey): """ This functions creates binary images using thresholding :param img_grey: greyscale image :return: binary image """ # # Global # ret1, thresh1 = cv.threshold(img_grey, 127, 255, cv.THRESH_BINARY_INV) # show_img(thresh1) # # # Adaptive Mean # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2) # ret2, thresh2 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV) # show_img(thresh2) # # # Adaptive Gaussian # img_binary = cv.adaptiveThreshold(img_grey, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2) # ret3, thresh3 = cv.threshold(img_binary, 127, 255, cv.THRESH_BINARY_INV) # show_img(thresh3) # Otsu's thresholding after Gaussian filtering blur = cv.GaussianBlur(img_grey, (5, 5), 0) ret4, img_otsu = cv.threshold(blur, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU) ret4, thresh4 = cv.threshold(img_otsu, 127, 255, cv.THRESH_BINARY_INV) # show_img(thresh4) return thresh4
def run(self, ips, snap, img, para = None): med = cv2.ADAPTIVE_THRESH_MEAN_C if para['med']=='mean' else cv2.ADAPTIVE_THRESH_GAUSSIAN_C mtype = cv2.THRESH_BINARY_INV if para['inv'] else cv2.THRESH_BINARY cv2.adaptiveThreshold(snap, para['max'], med, para['inv'], para['size'], para['offset'], dst=img)
def process_data(): all_data = [] img_size = 256 contour_path= os.path.join(c.data_manual, 'manual_contours_ch4', 'contours') image_path = os.path.join(c.data_manual, 'manual_contours_ch4', 'images') for fn in [f for f in os.listdir(contour_path) if 'jpg' in f]: if not os.path.exists(os.path.join(image_path, fn)): continue img = cv2.imread(os.path.join(image_path, fn), 0) img = cv2.resize(img, (img_size,img_size)).reshape(1,1,img_size,img_size) label = cv2.imread(os.path.join(contour_path, fn), 0) label = cv2.resize(label, (img_size,img_size)) _,label = cv2.threshold(label, 127,255,cv2.THRESH_BINARY_INV) label = label.reshape(1,1,img_size,img_size)/255 all_data.append([img,label]) np.random.shuffle(all_data) all_imgs = np.concatenate([a[0] for a in all_data], axis=0) all_labels = np.concatenate([a[1] for a in all_data], axis=0) n = all_imgs.shape[0] destpath = os.path.join(c.data_intermediate, 'ch4_{}.hdf5'.format(img_size)) if os.path.exists(destpath): os.remove(destpath) u.save_hd5py({'images': all_imgs, 'labels': all_labels}, destpath, 5)
def threshold_img(img): """ Simple wrap-up function for cv2.threshold() """ is_color = len(img.shape) == 3 is_grey = len(img.shape) == 2 t = threshold_value(img) if is_color: gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) elif is_grey: gray = img.copy() blurred = cv2.GaussianBlur(gray, (3, 3), 0) (_, thresh) = cv2.threshold(blurred, t*255, 1, cv2.THRESH_BINARY_INV) return thresh
def clean_bg(filename): image = cv2.imread(filename,0) new_image = np.zeros(image.shape, np.uint8) height,width= image.shape for i in range(height): for j in range(width): new_image[i,j] = image[i,j]#max(image[i,j][0],image[i,j][1],image[i,j][2]) ret,new_image = cv2.threshold(new_image,180,255,cv2.THRESH_BINARY_INV) border_width = 2 new_image = new_image[border_width:height-border_width,border_width:width-border_width] #cv2.imshow('invImage',new_image) #cv2.waitKey(0) #cv2.destroyAllWindows() return new_image
def make_mask(limb, filename): """ Given a limb (right or left) and a name to save to (in the baxter_tools/share/images/ directory), create a mask of any dark objects in the image from the camera and save it. """ image_sub = rospy.Subscriber( '/cameras/' + limb + '_hand_camera/image',Image,callback) try: bridge = CvBridge() cv_image = bridge.imgmsg_to_cv2(img, "bgr8") except CvBridgeError, e: print e msk = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV) return msk
def recognizeDigit(digit, method = REC_METHOD_TEMPLATE_MATCHING, threshold= 55): """ Finds the best match for the given digit(RGB or gray color scheme). And returns the result and percentage as an integer. @threshold percentage of similarity """ __readDigitTemplates() digit = digit.copy() if digit.shape[2] == 3: digit = cv2.cvtColor(digit, cv2.COLOR_RGB2GRAY) ret, digit = cv2.threshold(digit, 90, 255, cv2.THRESH_BINARY_INV) bestDigit = -1 if method == REC_METHOD_TEMPLATE_MATCHING: bestMatch = None for i in range(len(__DIGIT_TEMPLATES)): template = __DIGIT_TEMPLATES[i].copy() if digit.shape[1] < template.shape[1]: template = cv2.resize(template, (digit.shape[1], digit.shape[0])) else: digit = cv2.resize(digit, (template.shape[1], template.shape[0])) result = cv2.matchTemplate(digit, template, cv2.TM_CCORR_NORMED)#cv2.TM_CCOEFF_NORMED) (_, max_val, _, max_loc) = cv2.minMaxLoc(result) if bestMatch is None or max_val > bestMatch: bestMatch = max_val bestDigit = i print("New Best Match:", bestMatch, bestDigit) if (bestMatch * 100) >= threshold: return (bestDigit, bestMatch * 100) return (-1, 0)
def parse_arg(argv): ''' parsing cli arguments ''' parser = argparse.ArgumentParser(description='image processing: rotation and binarization.') parser.add_argument('-i', '--inpf', default='IMG_0531-2.jpg', help='input image file') parser.add_argument('-r', '--rotate', type=float, default=0, help='the angle (deg) of rotation (CCW).') parser.add_argument('-b', '--binarize', type=int, default=0, help='method of binarize. 0->THRESH_BINARY, 1->THRESH_BINARY_INV') return parser.parse_args(argv[1:])
def predict(url): global model, COOKED_PHRASES, RAW_PHRASES # Read image image = io.imread(url) image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC) # Use otsu to mask gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) mask = cv2.medianBlur(mask, 5) # Get features features = describe(image, mask) # Predict it result = model.predict([features]) probability = model.predict_proba([features])[0][result][0] state = le.inverse_transform(result)[0] phrase = '' if 'cook' in state: phrase = COOKED_PHRASES[int(random.random()*len(COOKED_PHRASES))] elif 'raw' in state: phrase = RAW_PHRASES[int(random.random()*len(RAW_PHRASES))] return {'type': state, 'confidence': probability, 'phrase': phrase}
def make_image(self, image_path): img = cv2.imread(image_path, 0) if img is None: print "Image not found at '{}'".format(image_path) return img = cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_CUBIC) _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV) self.img = np.clip(img, -1, 100)
def grabcuthm(im, hm): size = hm.shape bright = np.amax(hm) ret,fgd = cv2.threshold(hm, FGD_BOUND * bright, 1 * bright, cv2.THRESH_BINARY) fgd[1:size[0]/2] = 0 fgd[1:size[0], 1:size[1]/4] = 0 fgd[1:size[0], size[1]*3/4:size[1]] = 0 ret,pr_fgd = cv2.threshold(hm, FGD_BGD_SEP * bright, 1 * bright, cv2.THRESH_BINARY) pr_fgd -= fgd ret, bgd = cv2.threshold(hm, BGD_BOUND * bright, 1 * bright, cv2.THRESH_BINARY_INV) bgd[size[0]/3:size[0]] = 0 ret,pr_bgd = cv2.threshold(hm, FGD_BGD_SEP * bright, 1 * bright, cv2.THRESH_BINARY_INV) pr_bgd -= bgd mask = cv2.GC_BGD * bgd + cv2.GC_FGD * fgd + cv2.GC_PR_BGD * pr_bgd + cv2.GC_PR_FGD * pr_fgd mask = mask.astype(np.uint8, copy=False) bgdModel = np.zeros((1,65),np.float64) fgdModel = np.zeros((1,65),np.float64) rect = (0, im.shape[:2][0]/2, im.shape[:2][1], im.shape[:2][0]) cv2.grabCut(im, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK) mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8') return mask2
def extract_bv(image): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) contrast_enhanced_green_fundus = clahe.apply(image) # applying alternate sequential filtering (3 times closing opening) r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) f4 = cv2.subtract(R3,contrast_enhanced_green_fundus) f5 = clahe.apply(f4) # removing very small contours through area parameter noise removal ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY) mask = np.ones(f5.shape[:2], dtype="uint8") * 255 im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt) <= 200: cv2.drawContours(mask, [cnt], -1, 0, -1) im = cv2.bitwise_and(f5, f5, mask=mask) ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV) newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood # vessels and also in an interval of area fundus_eroded = cv2.bitwise_not(newfin) xmask = np.ones(image.shape[:2], dtype="uint8") * 255 x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in xcontours: shape = "unidentified" peri = cv2.arcLength(cnt, True) approx = cv2.approxPolyDP(cnt, 0.04 * peri, False) if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100: shape = "circle" else: shape = "veins" if(shape=="circle"): cv2.drawContours(xmask, [cnt], -1, 0, -1) finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask) blood_vessels = cv2.bitwise_not(finimage) dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1) #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) blood_vessels_1 = cv2.bitwise_not(dilated) return blood_vessels_1
def hsvPassShadowRemoval(src, shadowThreshold): blurLevel = 3 height, width = src.shape[:2] imgHSV = cv2.cvtColor(src, cv2.COLOR_RGB2HSV) gaussianBlur = cv2.GaussianBlur(imgHSV, (blurLevel, blurLevel), 0) hueImg, satImg, valImg = cv2.split(gaussianBlur) NSVDI = np.zeros((height, width, 1), np.uint8) count = height * width with np.errstate(divide='ignore'): # for i in range(0, height): # for j in range(0, width): # sat = int(satImg[i, j]) # val = int(valImg[i, j]) # NSVDI[i, j] = (satImg[i, j] - valImg[i, j]) / ((satImg[i, j] + valImg[i, j]) * 1.0) NSVDI = (satImg + valImg) / ((satImg - valImg) * 1) thresh = np.sum(NSVDI) avg = thresh / (count * 1.0) # for i in range(0, height): # for j in range(0, width): # if NSVDI[i, j] >= 0.25: # hueImg[i, j] = 255 # satImg[i, j] = 255 # valImg[i, j] = 255 # else: # hueImg[i, j] = 0 # satImg[i, j] = 0 # valImg[i, j] = 0 if shadowThreshold is None: avg = avg else: avg = shadowThreshold np.where(NSVDI > avg, 255, 0) _, threshold = cv2.threshold(NSVDI, avg, 255, cv2.THRESH_BINARY_INV) output = threshold return output
def get_mask(name, small, pagemask, masktype): sgray = cv2.cvtColor(small, cv2.COLOR_RGB2GRAY) if masktype == 'text': mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, ADAPTIVE_WINSZ, 25) if DEBUG_LEVEL >= 3: debug_show(name, 0.1, 'thresholded', mask) mask = cv2.dilate(mask, box(9, 1)) if DEBUG_LEVEL >= 3: debug_show(name, 0.2, 'dilated', mask) mask = cv2.erode(mask, box(1, 3)) if DEBUG_LEVEL >= 3: debug_show(name, 0.3, 'eroded', mask) else: mask = cv2.adaptiveThreshold(sgray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, ADAPTIVE_WINSZ, 7) if DEBUG_LEVEL >= 3: debug_show(name, 0.4, 'thresholded', mask) mask = cv2.erode(mask, box(3, 1), iterations=3) if DEBUG_LEVEL >= 3: debug_show(name, 0.5, 'eroded', mask) mask = cv2.dilate(mask, box(8, 2)) if DEBUG_LEVEL >= 3: debug_show(name, 0.6, 'dilated', mask) return np.minimum(mask, pagemask)
def find_contours(self, image): image = qimage_to_numpy(image) gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY) #_,thresh = cv2.threshold(gray,150,255,cv2.THRESH_BINARY_INV) # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) # dilated = cv2.dilate(gray,kernel,iterations = 13) contours, hierarchy = cv2.findContours(gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) return contours
def fixed_threshold(image, thresh_value=120, above_thresh_assigned=255, thresh_style=cv.THRESH_BINARY_INV): ''' :param thres_value: the threshold constant. :param thresh_style: can be any of the following. cv.THRESH_BINARY cv2.THRESH_BINARY_INV cv2.THRESH_TRUNC cv2.THRESH_TOZERO cv2.THRESH_TOZERO_INV ''' ret, thresholded = cv.threshold(image, thresh_value, above_thresh_assigned, thresh_style) return thresholded
def otsu_threshold(image, above_thresh_assigned=255, thresh_style=cv.THRESH_BINARY_INV): ''' apply otsu's binarization algorithm to find optimal threshold value. ''' ret, thresholded = cv.threshold(image, 0, above_thresh_assigned, thresh_style + cv.THRESH_OTSU) return { 'otsu_thresh': ret, 'image': thresholded }
def detect(self, image, mask = None): floatimage = np.float32(image) fb,fg,fr = cv2.split(floatimage) nonzero = fr != 0 difference = np.zeros(fr.shape, np.float32) difference[nonzero] = fb[nonzero] / fr[nonzero] _, result = cv2.threshold(difference, Configuration.br_ratio_threshold, 1, cv2.THRESH_BINARY_INV) return np.uint8(result)
def detect(self, image, mask = None): floatimage = np.float32(image) fb,fg,fr = cv2.split(floatimage) nonzero = (fr + fb) != 0 difference = np.zeros(fr.shape, np.float32) difference[nonzero] = (fb[nonzero] - fr[nonzero]) / (fb[nonzero] + fr[nonzero]) _, result = cv2.threshold(difference, Configuration.nbr_threshold, 1, cv2.THRESH_BINARY_INV) return np.uint8(result)
def adaptive_binarize_py(x, block_size=5, C=33.8): "Works like an edge detector." # ADAPTIVE_THRESH_GAUSSIAN_C, ADAPTIVE_THRESH_MEAN_C # THRESH_BINARY, THRESH_BINARY_INV import cv2 ret_imgs = opencv_wrapper(x, cv2.adaptiveThreshold, [255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, block_size, C]) return ret_imgs
def adaptiveThresholding(gray=None, neighbor=5, blur=False, k_size=3): if(blur): gray = cv2.GaussianBlur(gray, (k_size, k_size), 0) return cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, neighbor, 1)
def turnBinary(self, img, *args): """Pass image as graysacle, else will be converted, other args include 'a' - add to DB, 'inv'-inverting, 's'-show""" #makes sure img is grayscale if len(img.shape) == 3: img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) _,img = cv2.threshold(img, 254,255,cv2.THRESH_BINARY) print(img.shape) print("Turned Binary") try: for arg in args: # adds passed image to image db if arg == 'a': img_name = raw_input("Name for image\n") self.pickled_dict[img_name] = img self.savePickledDict() # inverts binary img if arg == 'inv': img_name = raw_input("Name for image\n") _, img = cv2.threshold(img,0,255,cv2.THRESH_BINARY_INV) # shows img if arg == "s": cv2.imshow('img',img) cv2.waitKey(0) except: pass return img
def getThresholdedImage(self,isInverted): threshType = cv2.THRESH_BINARY_INV if isInverted else cv2.THRESH_BINARY retval, threshold = cv2.threshold(self.imageToProcess,127,255,threshType) return threshold
def getOtsuBinarizedImage(self,isInverted): image = self.blurImage() threshType = cv2.THRESH_BINARY_INV if isInverted else cv2.THRESH_BINARY retval, threshed = cv2.threshold(image,0,255,threshType + cv2.THRESH_OTSU) return threshed
def cutting(self, writer, fname): fpath = '{:s}/{:s}/{:s}'.format(self.conf.source_path, writer, fname) img = cv2.imread(fpath) if img is None: return img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_gray = cv2.GaussianBlur(img_gray, (5, 5), 0) ret, im_th = cv2.threshold(img_gray, 90, 255, cv2.THRESH_BINARY_INV) ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) rects = [cv2.boundingRect(ctr) for ctr in ctrs] if self.debug: print('=> file path = {:s}'.format(fpath)) for i, rect in enumerate(rects): font = cv2.FONT_HERSHEY_SIMPLEX cv2.rectangle(img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3) cv2.putText(img, '({:d},{:d})'.format(rect[0], rect[1]), (rect[0], rect[1]), font, 0.8, (0, 255, 0), 2) cv2.putText(img, '({:d},{:d})'.format(rect[0] + rect[2], rect[1] + rect[3]), (rect[0] + rect[2], rect[1] + rect[3]), font, 0.8, (0, 255, 0), 2) leng = int(rect[3] * 1.6) pt1 = int(rect[1] + rect[3] // 2 - leng // 2) pt2 = int(rect[0] + rect[2] // 2 - leng // 2) if pt1 < 0 or pt2 < 0: continue roi = im_th[pt1:pt1 + leng, pt2:pt2 + leng] print("i = {:d} leng = {:.0f} pt1 = {:d} pt2 = {:d} rect[0] = {:d} rect[1] = {:d} rect[2] = {:d} rect[3] = {:d}".format(i, leng, pt1, pt2, rect[0], rect[1], rect[2], rect[3])) from matplotlib import pyplot import matplotlib as mpl fig = pyplot.figure() ax = fig.add_subplot(1, 1, 1) imgplot = ax.imshow(roi, cmap=mpl.cm.Greys) imgplot.set_interpolation('nearest') ax.xaxis.set_ticks_position('top') ax.yaxis.set_ticks_position('left') #pyplot.show() roi = cv2.resize(roi, (28, 28), interpolation = cv2.INTER_AREA) roi = cv2.dilate(roi, (3, 3)) #roi_hog_fd = hog(roi, orientations = 9, pixels_per_cell = (14, 14), cells_per_block = (1, 1), visualise = False) cv2.imwrite('{:s}/{:s}/img.{:d}.{:.2f}.jpg'.format(self.conf.train_path, writer, i, time.time()), roi) cv2.imwrite('{:s}/img.{:d}.jpg'.format(self.conf.tmp_path, i), img)
def splitimage(image): dpmm = min(image.shape[0:2]) / DOCSIZE[0] sizethresh = SIZE_THRESH_MM * dpmm uprightimg = makeupright(image) grayimg = getgrayimage(uprightimg) # top line top = grayimg[0,:] sepx = [0,] ret, binimg = cv2.threshold(top,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimg) for i in range(1,nlabels): if stats[i,cv2.CC_STAT_AREA] >= sizethresh: sepx.append(centroids[i][1]) # left line left = grayimg[:,0] sepy = [0,] ret, binimg = cv2.threshold(left,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(binimg) for i in range(1,nlabels): if stats[i,cv2.CC_STAT_AREA] >= sizethresh: sepy.append(centroids[i][1]) # divide into images imgs = [] for iy in range(len(sepy)): for ix in range(len(sepx)): if iy == len(sepy) - 1: if ix == len(sepx) - 1: #right-bottom corner imgs.append(uprightimg[int(sepy[iy]):,int(sepx[ix]):]) else: #bottom end imgs.append(uprightimg[int(sepy[iy]):,int(sepx[ix]):int(sepx[ix+1])]) else: if ix == len(sepx) - 1: #right end imgs.append(uprightimg[int(sepy[iy]):int(sepy[iy+1]),int(sepx[ix]):]) else: #others imgs.append(uprightimg[int(sepy[iy]):int(sepy[iy+1]),int(sepx[ix]):int(sepx[ix+1])]) return imgs
def find_contours(img): ''' :param img: (numpy array) :return: all possible rectangles (contours) ''' img_blurred = cv2.GaussianBlur(img, (5, 5), 1) # remove noise img_gray = cv2.cvtColor(img_blurred, cv2.COLOR_BGR2GRAY) # greyscale image # cv2.imshow('', img_gray) # cv2.waitKey(0) # Apply Sobel filter to find the vertical edges # Find vertical lines. Car plates have high density of vertical lines img_sobel_x = cv2.Sobel(img_gray, cv2.CV_8UC1, dx=1, dy=0, ksize=3, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT) # cv2.imshow('img_sobel', img_sobel_x) # Apply optimal threshold by using Oslu algorithm retval, img_threshold = cv2.threshold(img_sobel_x, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY) # cv2.imshow('s', img_threshold) # cv2.waitKey(0) # TODO: Try to apply AdaptiveThresh # Size of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on. # gaus_threshold = cv2.adaptiveThreshold(img_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 115, 1) # cv2.imshow('or', img) # cv2.imshow('gaus', gaus_threshold) # cv2.waitKey(0) # Define a stuctural element as rectangular of size 17x3 (we'll use it during the morphological cleaning) element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(17, 3)) # And use this structural element in a close morphological operation morph_img_threshold = deepcopy(img_threshold) cv2.morphologyEx(src=img_threshold, op=cv2.MORPH_CLOSE, kernel=element, dst=morph_img_threshold) # cv2.dilate(img_threshold, kernel=np.ones((1,1), np.uint8), dst=img_threshold, iterations=1) # cv2.imshow('Normal Threshold', img_threshold) # cv2.imshow('Morphological Threshold based on rect. mask', morph_img_threshold) # cv2.waitKey(0) # Find contours that contain possible plates (in hierarchical relationship) contours, hierarchy = cv2.findContours(morph_img_threshold, mode=cv2.RETR_EXTERNAL, # retrieve the external contours method=cv2.CHAIN_APPROX_NONE) # all pixels of each contour plot_intermediate_steps = False if plot_intermediate_steps: plot(plt, 321, img, "Original image") plot(plt, 322, img_blurred, "Blurred image") plot(plt, 323, img_gray, "Grayscale image", cmap='gray') plot(plt, 324, img_sobel_x, "Sobel") plot(plt, 325, img_threshold, "Threshold image") # plot(plt, 326, morph_img_threshold, "After Morphological filter") plt.tight_layout() plt.show() return contours
def avatart(self, ctx, *args): """Make a wordcloud in the shape of your avatar. usage: !avatart <invert> <bgcolor> """ fmt = "Making artwork {}, hold your horses!" msg = await self.bot.say(fmt.format(ctx.message.author.mention)) fin_img = path.join(self.d,self.e,"fin.png") # this whole block is lol if len(args) >= 2: if args[0] == "yes" or args[0] == "true" or args[0] == "invert": thresh = cv2.THRESH_BINARY_INV else: thresh = cv2.THRESH_BINARY try: if Color(args[1]): bg_colour = args[1] except: bg_colour = "white" elif len(args) == 1: try: if Color(args[0]): bg_colour = args[0] except: bg_colour = "white" if args[0] == "yes" or args[0] == "true" or args[0] == "invert": thresh = cv2.THRESH_BINARY_INV else: thresh = cv2.THRESH_BINARY else: thresh = cv2.THRESH_BINARY bg_colour = "white" ava = ctx.message.author.avatar_url # grab avatar URL try: if ava == "": print("there's no avatar for this user: "+str(ctx.message.author)) await self.bot.say("```I can't make avatar art without an avatar you silly goose. But it's ok, I have something special for you.```") img = cv2.imread(path.join(self.d,self.e,"default_avatar.jpg"),1) else: img_data = requests.get(ava, stream=True).content #dl from dat url img = cv2.imdecode(np.frombuffer(img_data, np.uint8),1) # convert from string butter to uint8 img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # grayscale that motha ret,img_bw = cv2.threshold(img_gray,127,255, thresh) #threshold values scaled = cv2.resize(img_bw, (1024,1024), interpolation = cv2.INTER_LINEAR) word = self.wordsFromDB(ctx.message.author) # retrieve words from DB text = " ".join(word) avatar_mask = np.array(scaled) # create mask wc = WordCloud(background_color=bg_colour, max_words=20000,stopwords=self.STOPWORDS, mask=avatar_mask) wc.generate(text) wc.to_file(fin_img) # save masked wordart to file await self.bot.send_file(ctx.message.channel, fin_img, content=ctx.message.author.mention) await self.bot.delete_message(msg) except: await self.bot.say("```Something has gone horribly wrong.```") # only refresh cache if an authorized ID
def count_fingers(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Otsu's thresholding after Gaussian filtering img = cv2.GaussianBlur(img, (5, 5), 0) ret, mask = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) cv2.imshow("Threshold", mask) (_, cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) list_far = [] list_end = [] if cnts: areas = [cv2.contourArea(c) for c in cnts] max_index = np.argmax(areas) cnt = cnts[max_index] M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) hull1 = cv2.convexHull(cnt) hull2 = cv2.convexHull(cnt, returnPoints=False) try: defects = cv2.convexityDefects(cnt, hull2) except Exception, e: defects = None print e counter = 0 if defects is not None: for i in range(defects.shape[0]): s, e, f, d = defects[i, 0] # start = tuple(cnt[s][0]) end = tuple(cnt[e][0]) far = tuple(cnt[f][0]) if d < 20000: continue if far[1] >= (cy+40): continue diff1 = abs(end[0]-far[0]) if diff1 > 100: continue cv2.line(img, end, far, (0, 0, 0), 2, 8) cv2.imshow("hand", img) cv2.waitKey(1) list_far.append(far) list_end.append(end) counter += 1 return mask, counter, hull1, (cx, cy), list_far, list_end
def checkButton(self, img, x1, y1, x2, y2): btn1 = img[y1:y2, x1:x2] btn1 = cv2.cvtColor(btn1, cv2.COLOR_BGR2GRAY) if self.thresh_change_trigger: ret, mask = cv2.threshold(btn1, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU) self.thresh_val.setText(str(ret)) self.THRESH = ret else: ret, mask = cv2.threshold(btn1, self.THRESH, 255, cv2.THRESH_BINARY_INV) try: (_, cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) except Exception, e: (cnts, _) = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) ci = 0 max_area = 0 if cnts: for i in range(len(cnts)): cnt = cnts[i] area = cv2.contourArea(cnt) if(area > max_area): max_area = area ci = i cnt = cnts[ci] else: cnt = None self.flags.isSet_prev = self.flags.isSet_cur if cnt is not None: cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 0), 1) hull = cv2.convexHull(cnt) cv2.drawContours(btn1, [hull], 0, (0, 0, 255), 1) self.flags.isSet_cur = True else: cv2.rectangle(img, (x1, y1), (x2, y2), (188, 188, 137), 1) self.flags.isSet_cur = False return img
def count_fingers(hand_frame): hand_frame = cv2.cvtColor(hand_frame,cv2.COLOR_BGR2GRAY) # Otsu's thresholding after Gaussian filtering hand_frame = cv2.GaussianBlur(hand_frame,(5,5),0) ret,mask = cv2.threshold(hand_frame,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) (cnts,_)=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) list_far=[] list_end=[] if cnts: areas = [cv2.contourArea(c) for c in cnts] max_index = np.argmax(areas) cnt=cnts[max_index] M = cv2.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) hull1 = cv2.convexHull(cnt) hull2 = cv2.convexHull(cnt,returnPoints = False) try: defects = cv2.convexityDefects(cnt,hull2) except Exception, e: defects = None print e counter = 0 if defects is not None: for i in range(defects.shape[0]): s,e,f,d = defects[i,0] start = tuple(cnt[s][0]) end = tuple(cnt[e][0]) far = tuple(cnt[f][0]) if d<20000: continue if far[1] >= (cy+40): continue else: pass list_far.append(far) list_end.append(end) counter +=1 return mask,counter,hull1,(cx,cy),list_far,list_end
def main(): while True: ret,img=cap.read() img = cv2.resize(img,None,fx=1.3,fy=1.2,interpolation = cv2.INTER_LINEAR) btn1 = img[0:100,250:350] btn1 = cv2.cvtColor(btn1,cv2.COLOR_BGR2GRAY) ret,mask = cv2.threshold(btn1,150,255,cv2.THRESH_BINARY_INV) (cnts,_)=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) res = cv2.bitwise_and(btn1,btn1,mask=mask) (cnts,_)=cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) ci = 0 max_area = 0 if cnts: for i in range(len(cnts)): cnt=cnts[i] area = cv2.contourArea(cnt) if(area>max_area): max_area=area ci=i cnt = cnts[ci] else: cnt = None #cv2.drawContours(btn1,cnt,-1,(0,255,0),1) font = cv2.FONT_HERSHEY_SIMPLEX if cnt is not None: cv2.rectangle(img,(250,0),(350,100),(0,0,0),2) hull = cv2.convexHull(cnt) cv2.drawContours(btn1,[hull],0,(0,0,255),2) cv2.putText(img,"Btn1",(0,50), font, 1,(255,0,0),2,1) else: cv2.rectangle(img,(250,0),(350,100),(188,188,137),2) cv2.imshow('Img',img) #cv2.imshow('btn1',mask) if cv2.waitKey(20)&0xff==ord('q'): cv2.imwrite('btn1.jpg',btn1) cv2.imwrite('mask.jpg',mask) break cap.release() cv2.destroyAllWindows()
def segment(self): self.im_gray = cv2.medianBlur(self.im_gray, 5) # Apply adaptive threshold with binary_inv thresh = cv2.adaptiveThreshold(self.im_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2) # apply some dilation and erosion to join the gaps thresh = cv2.dilate(thresh, None, iterations=3) thresh = cv2.erode(thresh, None, iterations=2) # finding contours im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) ''' cropped is a dictionary with (cx, cy) centroid tuples as keys, and cropped images as values centroids is a list of the same centroid tuples, (cx, cy) - This was done because it was not possible to sort the dictionary directly using tuples as keys using the sort(dict) function. - Instead, (cx, cy) was stored in the centroids list, and the list in turn was sorted using centroids.sort(). - The list is then iterated upon to get tuples in order... - Each tuple iterated upon acts as a key for the dictionary, fetching the cropped images in order ''' cropped = {(0, 0): '0'} centroids = [(0, 0)] for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) # finding centroid coordinates, so that it can be the basis of sorting cropped images M = cv2.moments(cnt) cx = int(M['m10'] / M['m00']) cy = int(M['m01'] / M['m00']) # storing centroid tuple and cropped image in dictionary cropped[(cx, cy)] = self.im_gray[y:y + h, x:x + w] # inserting centroid tuples to a list centroids.append((cx, cy)) # since (0, 0) was only a placeholder del cropped[(0, 0)] centroids.remove((0, 0)) # sorting the centroid list centroids.sort() segments = [] for c in centroids: segments.append(cropped[c]) return segments
def findCageOption(self): """Finds cage option in fishing guild when right clicked on fish bubbles""" x1 = 5 y1 = 25 x2 = 767 y2 = 524 rs_window = Screenshot.shoot(x1,y1,x2,y2) #cv2.imshow('img',rs_window) #cv2.waitKey(0) rsc = rs_window.copy() # gets only all the black and white ret,thresh1 = cv2.threshold(rsc,0,255,cv2.THRESH_BINARY) # inverst to only get black cloros as white ret,thresh1 = cv2.threshold(thresh1,0,255,cv2.THRESH_BINARY_INV) _, contours,h = cv2.findContours(thresh1,1,2) for cnt in contours: # looks for biggest square if cv2.contourArea(cnt) <= 1695.0: continue # checks contour sides approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True) # draws only if its squared if len(approx)==4: print("square of {}".format(cv2.contourArea(cnt))) #cv2.drawContours(rs_window,[cnt],0,(255,255,255),-1) # get geometry of approx # add rs coords x,y,w,h = cv2.boundingRect(cnt) #combines rs_window coords x += x1 y += y1 # scrshot of option menu on play window img = Screenshot.shoot(x,y,x+w,y+h) ret,thresh1 = cv2.threshold(img,254,255,cv2.THRESH_BINARY) # loads image from db img_from_dict = self.idb.pickled_dict['cage'] #finds a match from modules import Match # runs func when match is found returns true to keep looking for template if Match.images(thresh1,img_from_dict,x,y, self.doInMatched): # keep looking for other bubbles return 1 else: # found 'cage' return 0 # in case the options menu is aginast an edge return 1
def __findLine(self): self.__grabImage(); if(self.currentImage is None): #grabbing image failed return -2.0 #Convert to Grayscale img = cv2.cvtColor(self.currentImage, cv2.COLOR_BGR2GRAY) #Blur to reduce noise img = cv2.medianBlur(img,25) #Do Thresholding h,img = cv2.threshold(img, self.thresh, self.maxValue, cv2.THRESH_BINARY_INV) img = cv2.blur(img,(2,2)) #Make image smaller img = cv2.resize(img, (self.horizontalRes, self.verticalRes)) #org_img = cv2.resize(org_img, (self.horizontalRes, self.verticalRes)) #Create skeleton size = np.size(img) skel = np.zeros(img.shape,np.uint8) element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) done = False while( not done): eroded = cv2.erode(img,element) temp = cv2.dilate(eroded,element) temp = cv2.subtract(img,temp) skel = cv2.bitwise_or(skel,temp) img = eroded.copy() zeros = size - cv2.countNonZero(img) if zeros==size: done = True #Do Line Detection lines = cv2.HoughLinesP(skel,1,np.pi/180,2, self.hough_minLineLength,self.hough_maxLineGap) #get minimum and maximum x-coordinate from lines x_min = self.horizontalRes+1.0 x_max = -1.0; if(lines != None and len(lines[0]) > 0): for x1,y1,x2,y2 in lines[0]: x_min = min(x_min, x1, x2) x_max = max(x_max, x1, x2) #cv2.line(org_img,(x1,y1),(x2,y2),(0,255,0),2) #write output visualization #cv2.imwrite("output-img.png",org_img); #find the middle point x of the line and return #return -1.0 if no lines found if(x_max == -1.0 or x_min == (self.horizontalRes+1.0) ): return -1.0 #no line found else: return (x_min + x_max) / 2.0
def binaryMask(frame, x0, y0, width, height ): global guessGesture, visualize, mod, lastgesture, saveImg cv2.rectangle(frame, (x0,y0),(x0+width,y0+height),(0,255,0),1) roi = frame[y0:y0+height, x0:x0+width] gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray,(5,5),2) #blur = cv2.bilateralFilter(roi,9,75,75) th3 = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV,11,2) ret, res = cv2.threshold(th3, minValue, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) #ret, res = cv2.threshold(blur, minValue, 255, cv2.THRESH_BINARY +cv2.THRESH_OTSU) if saveImg == True: saveROIImg(res) elif guessGesture == True: retgesture = myNN.guessGesture(mod, res) if lastgesture != retgesture : lastgesture = retgesture #print lastgesture ## Checking for only PUNCH gesture here ## Run this app in Prediction Mode and keep Chrome browser on focus with Internet Off ## And have fun :) with Dino if lastgesture == 3: jump = ''' osascript -e 'tell application "System Events" to key code 49' ''' #jump = ''' osascript -e 'tell application "System Events" to key down (49)' ''' os.system(jump) print myNN.output[lastgesture] + "= Dino JUMP!" #time.sleep(0.01 ) #guessGesture = False elif visualize == True: layer = int(raw_input("Enter which layer to visualize ")) cv2.waitKey(1) myNN.visualizeLayers(mod, res, layer) visualize = False return res #%%