我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.Canny()。
def __bound_contours(roi): """ returns modified roi(non-destructive) and rectangles that founded by the algorithm. @roi region of interest to find contours @return (roi, rects) """ roi_copy = roi.copy() roi_hsv = cv2.cvtColor(roi, cv2.COLOR_RGB2HSV) # filter black color mask1 = cv2.inRange(roi_hsv, np.array([0, 0, 0]), np.array([180, 255, 125])) mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))) mask1 = cv2.Canny(mask1, 100, 300) mask1 = cv2.GaussianBlur(mask1, (1, 1), 0) mask1 = cv2.Canny(mask1, 100, 300) # mask1 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))) # Find contours for detected portion of the image im2, cnts, hierarchy = cv2.findContours(mask1.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5] # get largest five contour area rects = [] for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) x, y, w, h = cv2.boundingRect(approx) if h >= 15: # if height is enough # create rectangle for bounding rect = (x, y, w, h) rects.append(rect) cv2.rectangle(roi_copy, (x, y), (x+w, y+h), (0, 255, 0), 1); return (roi_copy, rects)
def find_squares(img): img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True) if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)]) if max_cos < 0.1: squares.append(cnt) return squares
def remove_borders(image): ratio = image.shape[0] / 500.0 orig = image.copy() image = resize(image, height=500) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, 75, 200) _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) cv2.imshow('edged', edged) cnts = sorted(cnts, key=cv2.contourArea, reverse=True) screenCnt = None for c in cnts: peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.02 * peri, True) print(len(approx) == 4) if len(approx) == 4: screenCnt = approx break cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2) if screenCnt is not None and len(screenCnt) > 0: return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio) return orig
def find_lines(img): edges = cv2.Canny(img,100,200) threshold = 60 minLineLength = 10 lines = cv2.HoughLinesP(edges, 1, np.pi/180, threshold, 0, minLineLength, 20); if (lines is None or len(lines) == 0): return #print lines for line in lines[0]: #print line cv2.line(img, (line[0],line[1]), (line[2],line[3]), (0,255,0), 2) cv2.imwrite("line_edges.jpg", edges) cv2.imwrite("lines.jpg", img)
def find_squares(img): img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) area = cv2.contourArea(cnt) if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1: if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07): squares.append(cnt) return squares
def get_chessboard_lines(binary_img): edges = cv2.Canny(binary_img,50,120) cv2.imshow('image',edges) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() lines_data = cv2.HoughLines(edges,1,np.pi/180,110) parallel_lines = [] vertical_lines = [] for rho,theta in lines_data[0]: #print 'rho: '+str(rho)+'theta: '+str(theta) if 2>theta > 1: vertical_lines.append([theta,rho]) elif theta < 1 : parallel_lines.append([theta,rho]) elif theta>3: parallel_lines.append([theta,rho]) a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv2.line(edges,(x1,y1),(x2,y2),(255,0,0),2) cv2.imshow('image',edges) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() vertical_lines=sorted(vertical_lines,key=lambda x: abs(x[1])) parallel_lines=sorted(parallel_lines,key=lambda x: abs(x[1])) return vertical_lines,parallel_lines
def homography(self, img, outdir_name=''): orig = img # 2?????? gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY) gauss = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(gauss, 50, 150) # 2?????????? contours = cv2.findContours(canny, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)[1] # ??????????? contours.sort(key=cv2.contourArea, reverse=True) if len(contours) > 0: arclen = cv2.arcLength(contours[0], True) # ??????????? approx = cv2.approxPolyDP(contours[0], 0.01 * arclen, True) # warp = approx.copy() if len(approx) >= 4: self.last_approx = approx.copy() elif self.last_approx is not None: approx = self.last_approx else: approx = self.last_approx rect = self.get_rect_by_points(approx) # warped = self.transform_by4(orig, warp[:, 0, :]) return orig[rect[0]:rect[1], rect[2]:rect[3]]
def find_squares(img, cos_limit = 0.1): print('search for squares with threshold %f' % cos_limit) img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < cos_limit : squares.append(cnt) else: #print('dropped a square with max_cos %f' % max_cos) pass return squares ### ### Version V2. Collect meta-data along the way, with commentary added. ###
def find_bibs(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY); binary = cv2.GaussianBlur(gray,(5,5),0) ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU); #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY); #lapl = cv2.Laplacian(image,cv2.CV_64F) #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY); #blurred = cv2.GaussianBlur(lapl,(5,5),0) #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU); #cv2.imwrite("lapl.jpg", lapl) edges = cv2.Canny(image,175,200) cv2.imwrite("edges.jpg", edges) binary = edges cv2.imwrite("binary.jpg", binary) contours,hierarchy = find_contours(binary) return get_rectangles(contours)
def diff_rect(img1, img2, pos=None): """find counters include pos in differences between img1 & img2 (cv2 images)""" diff = cv2.absdiff(img1, img2) diff = cv2.GaussianBlur(diff, (3, 3), 0) edges = cv2.Canny(diff, 100, 200) _, thresh = cv2.threshold(edges, 0, 255, cv2.THRESH_BINARY) contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) if not contours: return None contours.sort(key=lambda c: len(c)) # no pos provide, just return the largest different area rect if pos is None: cnt = contours[-1] x0, y0, w, h = cv2.boundingRect(cnt) x1, y1 = x0+w, y0+h return (x0, y0, x1, y1) # else the rect should contain the pos x, y = pos for i in range(len(contours)): cnt = contours[-1-i] x0, y0, w, h = cv2.boundingRect(cnt) x1, y1 = x0+w, y0+h if x0 <= x <= x1 and y0 <= y <= y1: return (x0, y0, x1, y1)
def multiple_template_match(self, feature, scene, roi=None, scale=None, min_scale=0.5, max_scale=1.0, max_distance=14, min_corr=0.8, debug=False, threshold_min=50, threshold_max=200): if roi is not None: scene = scene[roi.top:(roi.top + roi.height), roi.left:(roi.left + roi.width)] if not scale: scale = self.find_best_scale(feature, scene, min_scale=min_scale, max_scale=max_scale, min_corr=min_corr) peaks = [] if scale: scaled_feature = cv2.resize(feature, (0, 0), fx=scale, fy=scale) canny_scene = cv2.Canny(scene, threshold_min, threshold_max) canny_feature = cv2.Canny(scaled_feature, threshold_min, threshold_max) # Threshold for peaks. corr_map = cv2.matchTemplate(canny_scene, canny_feature, cv2.TM_CCOEFF_NORMED) _, max_corr, _, max_loc = cv2.minMaxLoc(corr_map) good_points = list(zip(*np.where(corr_map >= max_corr - self.tolerance))) if debug: print(max_corr, good_points) clusters = self.get_clusters(good_points, max_distance=max_distance) peaks = [max([(pt, corr_map[pt]) for pt in cluster], key=lambda pt: pt[1]) for cluster in clusters] return (scale, peaks)
def applyTransform(self): self.framing(self.path) self.height,self.width=cv2.imread("Frames/1.jpg").shape[:2] # write transformed video out = cv2.VideoWriter("changedOutput.mp4",cv.CV_FOURCC('a','v','c','1'), 30.0, (self.width, self.height)) folder=self.sort_files() # write Transformed video frames for i in folder: pic="Frames/"+str(i)+".jpg" Newpic=cv2.imread(pic,0) frame=cv2.Canny(Newpic,100,200) cv2.imwrite(pic,frame) Newpic=cv2.imread(pic) img=cv2.flip(Newpic,0) out.write(img) out.release() # Writing output video file
def lineRecognizer(path): ''' :param path ???????? :returns lines_data ?????????resize_pic ?????? ''' img = cv2.imread(path,cv2.IMREAD_GRAYSCALE) resize_pic=img #resize_pic=cv2.resize(img,(640,480),interpolation=cv2.INTER_CUBIC) edges = cv2.Canny(resize_pic,50,150) lines_data = cv2.HoughLines(edges,1,np.pi/180,150) return lines_data,resize_pic
def __init__(self, blankBoard, fullBoard, InitialVerticesCount, blackThreshold, FinalVerticesCount=150, Offset=15): self.INITIAL_VERTICES_COUNT = InitialVerticesCount self.FINAL_VERTICES_COUNT = FinalVerticesCount self.OFFSET = Offset self.blackThreshold = blackThreshold self.blankBoard = cv2.imread(blankBoard) self.fullBoard = cv2.imread(fullBoard) # get the edges from the images self.blankBoardEdges = cv2.Canny(self.blankBoard, 0, 100) self.fullBoardEdges = cv2.Canny(self.fullBoard, 0, 100) # try to identify the four corners in the blankBoard self.detectFourCorners(self.blankBoardEdges) # process both the images self.blankBoardMatrix = self.process(self.blankBoard, self.blankBoardEdges, self.blankName) self.fullBoardMatrix = self.process(self.fullBoard, self.fullBoardEdges, self.fullName) # fucntion to sharpen the image
def createTrainingData(filename,time_start,time_stop): vidcap = cv2.VideoCapture(filename) try: os.makedirs("trainingdata_"+filename) except OSError: pass os.chdir("trainingdata_"+filename) length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(vidcap.get(cv2.CAP_PROP_FPS)) for time in range(time_start,time_stop): vidcap.set(cv2.CAP_PROP_POS_MSEC,time*1000) success,image = vidcap.read() image = cv2.medianBlur(image,7) resized = imutils.resize(image, width=800) p1 = resized[370:430,220:300] p2 = resized[370:430,520:600] p1 = cv2.Canny(p1, 400, 100, 255) p2 = cv2.Canny(p2, 400, 100, 255) cv2.imwrite('p1_'+str(time)+".png",p1) cv2.imwrite('p2_'+str(time)+".png",p2) os.chdir("..")
def filter_image(image, canny1=10, canny2=10, show=False): # compute the ratio of the old height to the new height, and resize it image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_CUBIC) # convert the image to grayscale, blur it, and find edges in the image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, canny1, canny2) # show the image(s) if show: cv2.imshow("Edged", edged) cv2.waitKey(0) cv2.destroyAllWindows() return edged
def filter_image(image, canny1=5, canny2=5, show=False): # compute the ratio of the old height to the new height, and resize it image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_NEAREST) # convert the image to grayscale, blur it, and find edges in the image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(blurred, canny1, canny2) # show the image(s) if show: cv2.imshow("Edged", edged) cv2.waitKey(0) cv2.destroyAllWindows() return edged
def filter_image(image, canny1=10, canny2=10, show=False): # compute the ratio of the old height to the new height, and resize it image = imutils.resize(image, height=scale_factor, interpolation=cv2.INTER_NEAREST) # convert the image to grayscale, blur it, and find edges in the image gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (5, 5), 0) edged = cv2.Canny(gray, canny1, canny2) # show the image(s) if show: cv2.imshow("Edged", edged) cv2.waitKey(0) cv2.destroyAllWindows() return edged
def GetImageContour(self): thresholdImage = self.__convertImagetoBlackWhite() #B & W with adaptive threshold thresholdImage = cv.Canny(thresholdImage, 100, 200) #Edges by canny edge detection thresholdImage, contours, hierarchy = cv.findContours( thresholdImage, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) self.Contours = contours # uncomment this to see the contours on the image # cv2.drawContours(thresholdImage, contours, -1, (0,255,0), 3) # patternFindingObj=PatternFinding() # areas= [cv.contourArea(contour) for contour in contours] # for index in xrange(len(contours)): # IsPattern=self.IsPossibleQRContour(index) # if IsPattern is True: # x,y,w,h=cv.boundingRect(contours[index]) # cv.rectangle(self.imageOriginal,(x,y),(x+w,y+h),(0,0,255),2) # cv.imshow("hello",self.imageOriginal) # maxAreaIndex=np.argmax(areas) # x,y,w,h=cv.boundingRect(contours[maxAreaIndex]) # cv.rectangle(self.image2,(x,y),(x+w,y+h),(0,255,0),2) # cv.imshow("hello",self.imageOriginal) # cv.waitKey(0) #cv.destroyAllWindows() contour_group = (thresholdImage, contours, hierarchy) return contour_group
def run_step(self, img, thrs1, thrs2, debug): self.lines = [] self.lines2 = [] height, width, c = img.shape gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if(self.previous_mask_left == None): self.previous_mask_left = np.zeros((height, width, 1), np.uint8) if(self.previous_mask_right == None): self.previous_mask_right = np.zeros((height, width, 1), np.uint8) self.edge = cv2.Canny(gray, thrs1, thrs2, apertureSize=5) self.masked_edges_left, self.current_mask_left, self.previous_mask_left, self.left_line \ = self.update_edge_mask(self.previous_mask_left, self.left_line, -1, thrs1, thrs2, debug) self.masked_edges_right, self.current_mask_right, self.previous_mask_right, self.right_line \ = self.update_edge_mask(self.previous_mask_right, self.right_line, 1, thrs1, thrs2, debug) self.segment_history = self.boxes self.boxes = [find_lane_markers(self.masked_edges_left), find_lane_markers(self.masked_edges_right)] self.eps = [ep[-2:] + combine_eps(cur, past)[:2] for cur, past, ep in zip(self.boxes, self.segment_history, self.eps)] self.depth_pairs = [(a, b) for eps in self.eps for ep in eps if len(ep) > 0 for a, b in zip(ep[0], ep[1])[:2]]
def findSquare( self,frame ): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (7, 7), 0) edged = cv2.Canny(blurred, 60, 60) # find contours in the edge map (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # loop over our contours to find hexagon cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50] screenCnt = None for c in cnts: # approximate the contour peri = cv2.arcLength(c, True) approx = cv2.approxPolyDP(c, 0.004 * peri, True) # if our approximated contour has four points, then # we can assume that we have found our squeare if len(approx) >= 4: screenCnt = approx x,y,w,h = cv2.boundingRect(c) cv2.drawContours(image, [approx], -1, (0, 0, 255), 1) #cv2.imshow("Screen", image) #create the mask and remove rest of the background mask = np.zeros(image.shape[:2], dtype = "uint8") cv2.drawContours(mask, [screenCnt], -1, 255, -1) masked = cv2.bitwise_and(image, image, mask = mask) #cv2.imshow("Masked",masked ) #crop the masked image to to be compared to referance image cropped = masked[y:y+h,x:x+w] #scale the image so it is fixed size as referance image cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA) return cropped
def edge_detection(self, image, threshold1, threshold2, aperture): ''' Methode erkennt Kanten auf einem Bild basierend auf dem Canny-Algorithmus. Parameter --------- image : Bild threshold1 : Integer threshold2 : Integer aperture : Integer Rückgabe --------- image : Bild ''' return cv2.Canny(image, threshold1, threshold2, aperture)
def canny(im, blur=3): im_blur = cv2.blur(im, (blur,blur)) return cv2.Canny(im_blur, 50, 150, blur)
def _render(self): self._smoothed_img = cv2.GaussianBlur(self.image, (self._filter_size, self._filter_size), sigmaX=0, sigmaY=0) self._edge_img = cv2.Canny(self._smoothed_img, self._threshold1, self._threshold2) cv2.imshow('smoothed', self._smoothed_img) cv2.imshow('edges', self._edge_img)
def canny(img, lowThreshold): """ Performs canny edge detection on the provided grayscale image. :param img: a grayscale image :param lowThreshold: threshold for the canny operation :return: binary image containing the edges found by canny """ dst = np.zeros(img.shape, dtype=img.dtype) cv2.blur(img, (3, 3), dst) # canny recommends that the high threshold be 3 times the low threshold # the kernel size is 3 as defined above return cv2.Canny(dst, lowThreshold, lowThreshold * 3, dst, 3)
def camera_callback(self, msg): try: self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8") except cv_bridge.CvBridgeError: return gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 30, 150) cv2.imshow("Robot Camera", canny) cv2.waitKey(1)
def EdgeDetection(img): img = cv2.fastNlMeansDenoising(img,None,3,7,21) _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO) denoise_img = img laplacian = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3) # y canny = cv2.Canny(img,100,200) contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image} # GrayScale Image Convertor # https://extr3metech.wordpress.com
def __filterRedColor(image_hsv): """ Filters the red color from image_hsv and returns mask. """ mask1 = cv2.inRange(image_hsv, np.array([0, 100, 65]), np.array([10, 255, 255])) mask2 = cv2.inRange(image_hsv, np.array([155, 100, 70]), np.array([179, 255, 255])) mask = mask1 + mask2 mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))) mask = cv2.Canny(mask, 50, 100) mask = cv2.GaussianBlur(mask, (13, 13), 0) mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(2,2))) return mask
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" # print (obj.blockSize,obj.ksize,obj.k) # edges = cv2.Canny(img,obj.minVal,obj.maxVal) # color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) # edges=color # kernel = np.ones((obj.xsize,obj.ysize),np.uint8) opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations) if True: print "zeige" cv2.imshow(obj.Label,opening) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=opening
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" # print (obj.blockSize,obj.ksize,obj.k) edges = cv2.Canny(img,obj.minVal,obj.maxVal) color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) edges=color if True: print "zeige" cv2.imshow(obj.Label,edges) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=edges
def createCV_canny(): print "create CV canny ..." obj=FreeCAD.ActiveDocument.addObject('App::DocumentObjectGroupPython','Canny') obj.addProperty('App::PropertyFile','imageFile',"base").imageFile='/home/thomas/Bilder/bn_900.png' obj.addProperty('App::PropertyLink','imageNode',"base") obj.addProperty('App::PropertyBool','imageFromNode',"base").imageFromNode=False obj.addProperty('App::PropertyInteger','minVal',"canny").minVal=100 obj.addProperty('App::PropertyInteger','maxVal',"canny").maxVal=200 _CV_canny(obj,'/icons/bounder.png') _ViewProviderCV_canny(obj.ViewObject,__dir__+ '/icons/icon1.svg') app=MyApp() miki2=miki.Miki() miki2.app=app app.root=miki2 app.obj=obj obj.ViewObject.Proxy.cmenu.append(["Dialog",lambda:miki2.run(MyApp.s6)]) obj.ViewObject.Proxy.edit= lambda:miki2.run(MyApp.s6) return obj # # derived classes #
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" # print (obj.blockSize,obj.ksize,obj.k) # edges = cv2.Canny(img,obj.minVal,obj.maxVal) # color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) # edges=color # kernel = np.ones((obj.xsize,obj.ysize),np.uint8) closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations) if True: print "zeige" cv2.imshow(obj.Label,closing) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=closing
def execute_CannyEdge(proxy,obj): ''' create Canny Edge image with two parameters''' try: img=obj.sourceObject.Proxy.img.copy() except: img=cv2.imread(__dir__+'/icons/freek.png') edges = cv2.Canny(img,obj.minVal,obj.maxVal) obj.Proxy.img = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) say(["Canny Edge image updated",obj.minVal,obj.maxVal])
def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((*img.shape, 3), dtype = np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols.
def process_image(image): # printing out some stats and plotting print('This image is:', type(image), 'with dimesions:', image.shape) gray = grayscale(image) # Define a kernel size and apply Gaussian smoothing kernel_size = 5 blur_gray = gaussian_blur(gray, kernel_size) # plt.imshow(blur_gray, cmap='gray') # Define our parameters for Canny and apply low_threshold = 45 #50 high_threshold = 150 #150 edges = canny(blur_gray, low_threshold, high_threshold) # This time we are defining a four sided polygon to mask imshape = image.shape #vertices = np.array([[(0,imshape[0]),(475, 310), (475, 310), (imshape[1],imshape[0])]], dtype=np.int32) vertices = np.array([[(0,imshape[0]),(450, 330), (490, 310), (imshape[1],imshape[0])]], dtype=np.int32) masked_edges = region_of_interest(edges, vertices) # Define the Hough transform parameters # Make a blank the same size as our image to draw on rho = 1 # distance resolution in pixels of the Hough grid theta = np.pi/180 # angular resolution in radians of the Hough grid threshold = 15 # minimum number of votes (intersections in Hough grid cell) min_line_length = 40 #minimum number of pixels making up a line 150 - 40 max_line_gap = 130 # maximum gap in pixels between connectable line segments 58 -95 line_image = np.copy(image)*0 # creating a blank to draw lines on lines = hough_lines(masked_edges, rho, theta, threshold, min_line_length, max_line_gap) # Draw the lines on the edge image lines_edges = weighted_img(lines, image) return lines_edges
def setCallBack(self, update_func): self._update_func = update_func # OpenCV?Trackbar?Canny Edge??????
def detect_edges(images): def blur(image): return cv2.GaussianBlur(image, (5, 5), 0) def canny_otsu(image): scale_factor = 255 scaled_image = np.uint8(image * scale_factor) otsu_threshold = cv2.threshold( cv2.cvtColor(scaled_image, cv2.COLOR_RGB2GRAY), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[0] lower_threshold = max(0, int(otsu_threshold * 0.5)) upper_threshold = min(255, int(otsu_threshold)) edges = cv2.Canny(scaled_image, lower_threshold, upper_threshold) edges = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) return np.float32(edges) * (1 / scale_factor) blurred = [blur(image) for image in images] canny_applied = [canny_otsu(image) for image in blurred] return canny_applied
def process_img(img): original_image=img processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300) processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 ) copy=processed_img vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]]) processed_img = roi(processed_img, np.int32([vertices])) verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]]) platform = roi(copy, np.int32([verticesP])) # edges #lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2) #draw_lines(processed_img,lines) #draw_lines(original_image,lines) #Platform lines #imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY) ret,thresh = cv2.threshold(platform,127,255,0) im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(original_image, contours, -1, (0,255,0), 3) try: platformpos=contours[0][0][0] except: platformpos=[[0]] circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20, param1=90, param2=5, minRadius=1, maxRadius=3) ballpos=draw_circles(original_image,circles=circles) return processed_img,original_image,platform,platformpos,ballpos
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST): ''' contour_retrieval_mode is passed through as second argument to cv2.findContours ''' # Attempt to match edges found in blue, green or red channels : collect all channel = 0 for gray in cv2.split(self.img): channel += 1 print('channel %d ' % channel) title = self.tgen.next('channel-%d' % channel) if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) found = {} for thrs in xrange(0, 255, 26): print('Using threshold %d' % thrs) if thrs == 0: print('First step') bin = cv2.Canny(gray, 0, 50, apertureSize=5) title = self.tgen.next('canny-%d' % channel) if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) bin = cv2.dilate(bin, None) title = self.tgen.next('canny-dilate-%d' % channel) if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs)) if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title) bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE) title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs)) if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL: filteredContours = contours else: filteredContours = [] h = hierarchy[0] for component in zip(contours, h): currentContour = component[0] currentHierarchy = component[1] if currentHierarchy[3] < 0: # Found the outermost parent component filteredContours.append(currentContour) print('Contours filtered. Input %d Output %d' % (len(contours), len(filteredContours))) time.sleep(5) for cnt in filteredContours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) cnt_len = len(cnt) cnt_area = cv2.contourArea(cnt) cnt_isConvex = cv2.isContourConvex(cnt) if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max) and cnt_isConvex: cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < self.cos_limit : sq = Square(cnt, cnt_area, cnt_isConvex, max_cos) self.squares.append(sq) else: #print('dropped a square with max_cos %f' % max_cos) pass found[thrs] = len(self.squares) print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
def detect(img, seed): seed = (seed[1], seed[0]) shape = get_size(img) [Ms, Ml, Mshadow] = [np.zeros(shape, dtype=np.uint8) for i in range(3)] tmp = np.array(img) for i in range(DOWNSAMPLE): img_ds = downsample(tmp) tmp = img_ds seed_ds = (int(seed[0]*RATE**DOWNSAMPLE), int(seed[1]*RATE**DOWNSAMPLE)) seed_pixel = img_ds[seed_ds] shape_ds = (int(shape[0]*RATE**DOWNSAMPLE), int(shape[1]*RATE**DOWNSAMPLE)) edges = cv2.Canny(cv2.GaussianBlur(img, (3, 3), 0), CANNY[0], CANNY[1]) seed_mask_ds = np.zeros(get_size(img_ds)) seed_mask_ds[seed_ds] = 1 vis = np.zeros(get_size(img_ds)) directions = ((1, 0), (0, 1), (-1, 0), (0, -1)) def search(point, seed_pixel): if point[0] < 0 or point[1] < 0 or point[0] >= shape_ds[0] or point[1] >= shape_ds[1]: return if vis[point]: return elif edges[point]: return elif(dist(img_ds[point], seed_pixel) < COL_SEED): vis[point] = 1 seed_mask_ds[point] = 1 for i in range(4): search((point[0]+directions[i][0], point[1]+directions[i][1]), seed_pixel) for i in range(SEED_ITER): search(seed_ds, seed_pixel) seed_pixel = np.mean(np.mean(img_ds[np.where(seed_mask_ds == 1)], axis = 0), axis = 0) return seed_ds
def grow_seed(img, seed_loc): tmp = np.array(img) for i in range(DOWNSAMPLE): # Downsample the original image img_ds = downsample(tmp) tmp = img_ds seed_loc_ds = (int(seed_loc[0]*RATE**DOWNSAMPLE), int(seed_loc[1]*RATE**DOWNSAMPLE)) seed_pixel_ds = img_ds[seed_loc_ds] edges_ds = cv2.Canny(cv2.GaussianBlur(img_ds, (3, 3), 0), CANNY[0], CANNY[1]) seed_mask_ds = np.zeros(get_size(img_ds)) seed_mask_ds[seed_loc_ds] = 1 visited = np.zeros(get_size(img_ds)) def search(point, seed_pixel): if point[0] < 0 or point[1] < 0 or point[0] >= img_ds.shape[0] or point[1] >= img_ds.shape[1]: return if visited[point]: return elif edges_ds[point]: return elif(dist(img_ds[point], seed_pixel) < SEED_TOL): visited[point] = 1 seed_mask_ds[point] = 1 for i in range(4): search((point[0]+search_directions[i][0], point[1]+search_directions[i][1]), seed_pixel) for i in range(SEED_ITER): search(seed_loc_ds, seed_pixel_ds) seed_pixel_ds = np.mean(img_ds[np.where(seed_mask_ds == 1)], axis = 0) print(seed_pixel_ds) visited[:, :] = 0 for i in range(DOWNSAMPLE): seed_mask_ds = upsample(seed_mask_ds) seed_mask_ds[np.where(seed_mask_ds > 0.5)] = 1 seed_mask_ds[np.where(seed_mask_ds <= 0.5)] = 0 return seed_mask_ds
def find_bib(image): width, height, depth = image.shape gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY); #gray = cv2.equalizeHist(gray) blurred = cv2.GaussianBlur(gray,(5,5),0) debug_output("find_bib_blurred", blurred) #binary = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, blockSize=25, C=0); ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU); #ret,binary = cv2.threshold(blurred, 170, 255, cv2.THRESH_BINARY); debug_output("find_bib_binary", binary) threshold_contours,hierarchy = find_contours(binary) debug_output("find_bib_threshold", binary) edges = cv2.Canny(gray,175,200, 3) edge_contours,hierarchy = find_contours(edges) debug_output("find_bib_edges", edges) contours = threshold_contours + edge_contours debug_output_contours("find_bib_threshold_contours", image, contours) rectangles = get_rectangles(contours) debug_output_contours("find_bib_rectangles", image, rectangles) potential_bibs = [rect for rect in rectangles if is_potential_bib(rect, width*height)] debug_output_contours("find_bib_potential_bibs", image, potential_bibs) ideal_aspect_ratio = 1.0 potential_bibs = sorted(potential_bibs, key = lambda bib: abs(aspect_ratio(bib) - ideal_aspect_ratio)) return potential_bibs[0] if len(potential_bibs) > 0 else np.array([[(0,0)],[(0,0)],[(0,0)],[(0,0)]]) # # Checks that the size and aspect ratio of the contour is appropriate for a bib. #
def predict(): response = requests.get(slide_captcha_url) base64_image = response.json()['data']['dataUrl'] base64_image_without_head = base64_image.replace('data:image/png;base64,', '') bytes_io = BytesIO(base64.b64decode(base64_image_without_head)) img = np.array(Image.open(bytes_io).convert('RGB')) img_blur = cv2.GaussianBlur(img, (3, 3), 0) img_gray = cv2.cvtColor(img_blur, cv2.COLOR_BGR2GRAY) img_canny = cv2.Canny(img_gray, 100, 200) operator = get_operator('shape.png') (x, y), _ = best_match(img_canny, operator) x = x + bias print('the position of x is', x) buffer = mark(img, x, y) return {'value': x, 'image': base64.b64encode(buffer.getbuffer()).decode()}
def getEdges(gray,detector,min_thr=None,max_thr=None): """ Where detector in {1,2,3,4} 1: Laplacian 2: Sobelx 3: Sobely 4: Canny 5: Sobelx with possitive and negative slope (in 2 negative slopes are lost) """ if min_thr is None: min_thr = 100 max_thr = 200 if detector == 1: return cv2.Laplacian(gray,cv2.CV_64F) elif detector == 2: return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1) elif detector == 3: return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1) elif detector == 4: return cv2.Canny(gray,min_thr,max_thr) # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the # intensity gradient -value that measures how different is a pixel to its neighbors-) elif detector == 5: sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5) abs_sobel64f = np.absolute(sobelx64f) return np.uint8(abs_sobel64f)
def main(): # parse command line options if len(sys.argv) != 2: print 'Usage: python input_name output_name' exit(1) filePath = sys.argv[1] print "<----- processing %s ----->" % filePath #??????????????????????????????? img = cv2.imread(filePath, 0) img = cv2.resize(img, (1200, 900)) # ?????? # imgArr = np.array(img) # imgMean = np.mean(img) # imgcopy = imgArr - imgMean # imgcopy = imgcopy * 2 + imgMean * 3 # imgcopy = imgcopy / 255 canny = cv2.Canny(img, 60, 300) inverted = cv2.bitwise_not(canny) cv2.imshow('Canny', inverted) test1 = Image.fromarray(canny) test2 = Image.fromarray(inverted) result = pytesseract.image_to_string(test1, lang="eng", config="-c tessedit_char_whitelist=0123456789X") print result print "-------" result = pytesseract.image_to_string(test2, lang="eng") print result k = cv2.waitKey(0)
def process(img): img=cv2.medianBlur(img,5) kernel=np.ones((3,3),np.uint8) #img=cv2.erode(img,kernel,iterations = 1) sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3) element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1)) element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) dilation = cv2.dilate(sobel, element2, iterations = 1) erosion = cv2.erode(dilation, element1, iterations = 1) dilation2 = cv2.dilate(erosion, element2,iterations = 3) #img=cv2.dilate(img,kernel,iterations = 1) #img=cv2.Canny(img,100,200) return dilation2