我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用cv2.THRESH_TOZERO。
def EdgeDetection(img): img = cv2.fastNlMeansDenoising(img,None,3,7,21) _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO) denoise_img = img laplacian = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3) # y canny = cv2.Canny(img,100,200) contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image} # GrayScale Image Convertor # https://extr3metech.wordpress.com
def MyDenoiseSobely(path): img_gray = ToGrayImage(path) img_mydenoise = MyDenoise(img_gray,5) img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21) _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO) sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3) return sobely
def extract_color( src, h_th_low, h_th_up, s_th, v_th ): hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(hsv) if h_th_low > h_th_up: ret, h_dst_1 = cv2.threshold(h, h_th_low, 255, cv2.THRESH_BINARY) ret, h_dst_2 = cv2.threshold(h, h_th_up, 255, cv2.THRESH_BINARY_INV) dst = cv2.bitwise_or(h_dst_1, h_dst_2) else: ret, dst = cv2.threshold(h, h_th_low, 255, cv2.THRESH_TOZERO) ret, dst = cv2.threshold(dst, h_th_up, 255, cv2.THRESH_TOZERO_INV) ret, dst = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY) ret, s_dst = cv2.threshold(s, s_th, 255, cv2.THRESH_BINARY) ret, v_dst = cv2.threshold(v, v_th, 255, cv2.THRESH_BINARY) dst = cv2.bitwise_and(dst, s_dst) dst = cv2.bitwise_and(dst, v_dst) return dst
def makeNormalizedColorChannels(image, thresholdRatio=10.): """ Creates a version of the (3-channel color) input image in which each of the (4) channels is normalized. Implements color opponencies as per Itti et al. (1998). Arguments: image : input image (3 color channels) thresholdRatio : the threshold below which to set all color values to zero. Returns: an output image with four normalized color channels for red, green, blue and yellow. """ intens = intensity(image) threshold = intens.max() / thresholdRatio logger.debug("Threshold: %d", threshold) r,g,b = cv2.split(image) cv2.threshold(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO) cv2.threshold(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO) cv2.threshold(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO) R = r - (g + b) / 2 G = g - (r + b) / 2 B = b - (g + r) / 2 Y = (r + g) / 2 - cv2.absdiff(r,g) / 2 - b # Negative values are set to zero. cv2.threshold(src=R, dst=R, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO) cv2.threshold(src=G, dst=G, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO) cv2.threshold(src=B, dst=B, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO) cv2.threshold(src=Y, dst=Y, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO) image = cv2.merge((R,G,B,Y)) return image
def getDarkColorPercent(image): height = np.size(image, 0) width = np.size(image, 1) imgSize = width * height result = cv2.threshold(image, 100, -1, cv2.THRESH_TOZERO)[1] nonzero = cv2.countNonZero(result) if nonzero > 0: return (imgSize - nonzero) / float(imgSize) else: return 0 # ???????
def EdgeDetection(img): # img = cv2.medianBlur(img,5) img = cv2.fastNlMeansDenoising(img,None,3,7,21) _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO) denoise_img = img # print(img) # cv2.imwrite("Denoise.jpg",img) # cv2.waitKey(0) # cv2.destroyAllWindows() # convolute with proper kernels laplacian = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3) # y # sobel2y = cv2.Sobel(sobely,cv2.CV_64F,0,1,ksize=3) # sobelxy = cv2.Sobel(img,cv2.CV_64F,1,1,ksize=5) # y canny = cv2.Canny(img,100,200) contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(canny) # cv2.imwrite('laplacian.jpg',laplacian) # cv2.imwrite('sobelx.jpg',sobelx) # cv2.imwrite('sobely.jpg',sobely) # cv2.imwrite('sobelxy.jpg',sobelxy) # cv2.imwrite('canny.jpg',canny) # plt.subplot(3,2,1),plt.imshow(img,cmap = 'gray') # plt.title('Original'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,2),plt.imshow(laplacian,cmap = 'gray') # plt.title('Laplacian'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,3),plt.imshow(sobelx,cmap = 'gray') # plt.title('Sobel X'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,4),plt.imshow(sobely,cmap = 'gray') # plt.title('Sobel Y'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,4),plt.imshow(sobelxy,cmap = 'gray') # plt.title('Sobel XY'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,5),plt.imshow(canny,cmap = 'gray') # plt.title('Canny'), plt.xticks([]), plt.yticks([]) # plt.show() # return {"denoise":img} return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}
def trackObjects(self): for area in self.trackedAreasList: # Template matching gray = cv2.cvtColor(self.processedFrame, cv2.COLOR_BGR2GRAY) templ = area.getGrayStackAve() cc = cv2.matchTemplate(gray, templ, cv2.TM_CCOEFF_NORMED) cc = cc * cc * cc * cc _, cc = cv2.threshold(cc, 0.1, 0, cv2.THRESH_TOZERO) cc8 = cv2.normalize(cc, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) mask = np.zeros_like(cc8) # Search match within template region mcorn = area.getEnlargedCorners(0) # If not 0, enalrge the search cv2.rectangle(mask, mcorn[0], mcorn[1], 255, -1) _, _, _, mx = cv2.minMaxLoc(cc8, mask) # kp = area.getKalmanPredict() # area.updateWindow(kp) # area.setTemplate(self.processedFrame) # Prevent large spatial jumps (c, r, _, _) = area.getcrwh() jump = 10 if abs(c - mx[0]) < jump and abs(r - mx[1]) < jump: # area.setKalmanCorrect(mx) area.updateWindow(mx) else: # area.setKalmanCorrect((c, r)) area.updateWindow((c, r)) area.setTemplate(self.processedFrame) # Show the template stack if self.showTemplate is True: cv2.imshow('Stack: '+str(area), area.getStack()) else: try: cv2.destroyWindow('Stack: '+str(area)) except: pass # Show the matching results if self.showMatch is True: cv2.rectangle(cc8, mcorn[0], mcorn[1], 255, 1) cv2.circle(cc8, mx, 5, 255, 1) cv2.imshow('Match: '+str(area), cc8) else: try: cv2.destroyWindow('Match: '+str(area)) except: pass # Draw the tracked area on the image corn = area.getCorners() cv2.rectangle(self.workingFrame, corn[0], corn[1], (0, 255, 0), 1) # self.showFrame() # raw_input('wait')
def image_postprocessing_depth(gray, depth, t_size_y, t_size_x, feedback, t): if feedback: cv2.imwrite('feedback/image_' + str(t) + '_gray_0_input.png', gray) cv2.imwrite('feedback/image_' + str(t) + '_depth_0_input.png', gray) # resize normal image gray = cv2.resize(gray, (t_size_y, t_size_x)) if feedback: cv2.imwrite('feedback/image_' + str(t) + '_gray_1_resize.png', gray) # resize depth image depth = cv2.resize(depth, (t_size_y, t_size_x)) if feedback: cv2.imwrite('feedback/image_' + str(t) + '_depth_1_resize.png', depth) # cut normal image gray = gray[t_size_y/2-1:-1,:] if feedback: cv2.imwrite('feedback/image_' + str(t) + '_gray_2_cut.png', gray) # cut depth image depth = depth[t_size_y/2-1:-1,:] if feedback: cv2.imwrite('feedback/image_' + str(t) + '_depth_2_cut.png', depth) # threshold filter for the grayscale image ret,gray = cv2.threshold(gray,160,255,cv2.THRESH_BINARY) if feedback: cv2.imwrite('feedback/image_' + str(t) + '_gray_3_flt.png', gray) # custom filter for the depth image depth = cv2.bitwise_not(depth) ret, depth = cv2.threshold(depth,165,255,cv2.THRESH_TOZERO) if feedback: cv2.imwrite('feedback/image_' + str(t) + '_depth_3_flt_inv.png', depth) height, width = depth.shape # subtract lowest gray-value minval = np.min(depth[np.nonzero(depth)]) depth[np.nonzero(depth)] -= minval if feedback: cv2.imwrite('feedback/image_' + str(t) + '_depth_4_off.png', depth) # return the added image result = cv2.add(gray,depth) if feedback: cv2.imwrite('feedback/image_' + str(t) + '_final.png', result) return result # calculates the gray-scale image from ViZDoom