我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用cv2.HoughLines()。
def estimate_skew(image): edges = auto_canny(image) lines = cv2.HoughLines(edges, 1, np.pi / 90, 200) new = edges.copy() thetas = [] for line in lines: for rho, theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) if theta > np.pi / 3 and theta < np.pi * 2 / 3: thetas.append(theta) new = cv2.line(new, (x1, y1), (x2, y2), (255, 255, 255), 1) theta_mean = np.mean(thetas) theta = rad_to_deg(theta_mean) if len(thetas) > 0 else 0 return theta
def get_chessboard_lines(binary_img): edges = cv2.Canny(binary_img,50,120) cv2.imshow('image',edges) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() lines_data = cv2.HoughLines(edges,1,np.pi/180,110) parallel_lines = [] vertical_lines = [] for rho,theta in lines_data[0]: #print 'rho: '+str(rho)+'theta: '+str(theta) if 2>theta > 1: vertical_lines.append([theta,rho]) elif theta < 1 : parallel_lines.append([theta,rho]) elif theta>3: parallel_lines.append([theta,rho]) a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) cv2.line(edges,(x1,y1),(x2,y2),(255,0,0),2) cv2.imshow('image',edges) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() vertical_lines=sorted(vertical_lines,key=lambda x: abs(x[1])) parallel_lines=sorted(parallel_lines,key=lambda x: abs(x[1])) return vertical_lines,parallel_lines
def lineRecognizer(path): ''' :param path ???????? :returns lines_data ?????????resize_pic ?????? ''' img = cv2.imread(path,cv2.IMREAD_GRAYSCALE) resize_pic=img #resize_pic=cv2.resize(img,(640,480),interpolation=cv2.INTER_CUBIC) edges = cv2.Canny(resize_pic,50,150) lines_data = cv2.HoughLines(edges,1,np.pi/180,150) return lines_data,resize_pic
def find_lines(img, acc_threshold=0.25, should_erode=True): if len(img.shape) == 3 and img.shape[2] == 3: # if it's color img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = cv2.GaussianBlur(img, (11, 11), 0) img = cv2.adaptiveThreshold( img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 2) img = cv2.bitwise_not(img) # thresh = 127 # edges = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1] # edges = cv2.Canny(blur, 500, 500, apertureSize=3) if should_erode: element = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4)) img = cv2.erode(img, element) theta = np.pi/2000 angle_threshold = 2 horizontal = cv2.HoughLines( img, 1, theta, int(acc_threshold * img.shape[1]), min_theta=np.radians(90 - angle_threshold), max_theta=np.radians(90 + angle_threshold)) vertical = cv2.HoughLines( img, 1, theta, int(acc_threshold * img.shape[0]), min_theta=np.radians(-angle_threshold), max_theta=np.radians(angle_threshold), ) horizontal = list(horizontal) if horizontal is not None else [] vertical = list(vertical) if vertical is not None else [] horizontal = [line[0] for line in horizontal] vertical = [line[0] for line in vertical] horizontal = np.asarray(horizontal) vertical = np.asarray(vertical) return horizontal, vertical
def FindSkeleton(self): rgb = cv2.cvtColor(self.ImgHSV, cv2.COLOR_HSV2BGR) angle = 0 count = 0 gray = cv2.cvtColor(cv2.cvtColor(self.ImgHSV,cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray,50,150,apertureSize = 3) lines = cv2.HoughLines(edges,1,np.pi/180,110) #print (lines) line_count = lines.shape[0] for x in range(line_count): for rho,theta in lines[x]: a = np.cos(theta) b = np.sin(theta) #print(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) crr_angle = np.degrees(b) if (crr_angle < 5): #print(crr_angle) angle = angle + crr_angle count = count + 1 cv2.line(rgb,(x1,y1),(x2,y2),(0,0,255),2) angle = angle / count self.angle = angle return (angle)
def is_grid(self, grid, image): """ Checks the "gridness" by analyzing the results of a hough transform. :param grid: binary image :return: wheter the object in the image might be a grid or not """ # - Distance resolution = 1 pixel # - Angle resolution = 1° degree for high line density # - Threshold = 144 hough intersections # 8px digit + 3*2px white + 2*1px border = 16px per cell # => 144x144 grid # 144 - minimum number of points on the same line # (but due to imperfections in the binarized image it's highly # improbable to detect a 144x144 grid) lines = cv2.HoughLines(grid, 1, np.pi / 180, 144) if lines is not None and np.size(lines) >= 20: lines = lines.reshape((lines.size / 2), 2) # theta in [0, pi] (theta > pi => rho < 0) # normalise theta in [-pi, pi] and negatives rho lines[lines[:, 0] < 0, 1] -= np.pi lines[lines[:, 0] < 0, 0] *= -1 criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01) # split lines into 2 groups to check whether they're perpendicular if cv2.__version__[0] == '2': density, clmap, centers = cv2.kmeans( lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS) else: density, clmap, centers = cv2.kmeans( lines[:, 1], 2, None, criteria, 5, cv2.KMEANS_RANDOM_CENTERS) if self.debug: self.save_hough(lines, clmap) # Overall variance from respective centers var = density / np.size(clmap) sin = abs(np.sin(centers[0] - centers[1])) # It is probably a grid only if: # - centroids difference is almost a 90° angle (+-15° limit) # - variance is less than 5° (keeping in mind surface distortions) return sin > 0.99 and var <= (5*np.pi / 180) ** 2 else: return False
def is_grid(self, grid, image): """ Checks the "gridness" by analyzing the results of a hough transform. :param grid: binary image :return: wheter the object in the image might be a grid or not """ # - Distance resolution = 1 pixel # - Angle resolution = 1° degree for high line density # - Threshold = 144 hough intersections # 8px digit + 3*2px white + 2*1px border = 16px per cell # => 144x144 grid # 144 - minimum number of points on the same line # (but due to imperfections in the binarized image it's highly # improbable to detect a 144x144 grid) lines = cv2.HoughLines(grid, 1, np.pi / 180, 144) if lines is not None and np.size(lines) >= 20: lines = lines.reshape((lines.size/2), 2) # theta in [0, pi] (theta > pi => rho < 0) # normalise theta in [-pi, pi] and negatives rho lines[lines[:, 0] < 0, 1] -= np.pi lines[lines[:, 0] < 0, 0] *= -1 criteria = (cv2.TERM_CRITERIA_EPS, 0, 0.01) # split lines into 2 groups to check whether they're perpendicular if cv2.__version__[0] == '2': density, clmap, centers = cv2.kmeans( lines[:, 1], 2, criteria, 5, cv2.KMEANS_RANDOM_CENTERS) else: density, clmap, centers = cv2.kmeans( lines[:, 1], 2, None, criteria, 5, cv2.KMEANS_RANDOM_CENTERS) # Overall variance from respective centers var = density / np.size(clmap) sin = abs(np.sin(centers[0] - centers[1])) # It is probably a grid only if: # - centroids difference is almost a 90° angle (+-15° limit) # - variance is less than 5° (keeping in mind surface distortions) return sin > 0.99 and var <= (5*np.pi / 180) ** 2 else: return False
def houghlines(im,h): #im = cv2.imread('2.jpg') #ret,gray = cv2.threshold(im,40,255,cv2.THRESH_TOZERO_INV) #gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) #edges = cv2.Canny(gray,10,200) def getKey(item): return abs(item[1]-item[3]) edges = r(im) lines = cv2.HoughLines(edges,20,np.pi/190,100) horizontal = [] for line in lines: for rho,theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3 y1 = int(y0 + 1000*(a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0 x2 = int(x0 - 1000*(-b)) # But we need integers, so use int() function after that, ie int(np.around(x)) y2 = int(y0 - 1000*(a)) #cv2.line(im,(x1,y1),(x2,y2),(0,255,0),2) #print(str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2)) horizontal.append((x1,y1,x2,y2)) #cv2.imshow('houghlines',im) #cv2.waitKey(0) #cv2.destroyAllWindows() horizontal = sorted(horizontal,key=getKey) i = 0 votes = 0 while True: cv2.line(im,(horizontal[i][0],horizontal[i][1]),(horizontal[i][2],horizontal[i][3]),(200,0,0),2) average = (horizontal[i][1]+horizontal[i][3])/2.0 percent = average/h actual = 100-(percent*100) if actual > 80: i += 1 print(actual) elif actual < 25: print(actual) votes +=1 i +=1 elif actual <30: print("the coffee pot is getting low " + str(actual) + "% full!") return votes,actual else: print("the coffee pot is " + str(actual) + "% full!") return votes,actual
def houghlines(im,h): #im = cv2.imread('2.jpg') #ret,gray = cv2.threshold(im,40,255,cv2.THRESH_TOZERO_INV) #gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) #edges = cv2.Canny(gray,10,200) def getKey(item): return abs(item[1]-item[3]) edges = r(im) lines = cv2.HoughLines(edges,20,np.pi/190,100) horizontal = [] for line in lines: for rho,theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3 y1 = int(y0 + 1000*(a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0 x2 = int(x0 - 1000*(-b)) # But we need integers, so use int() function after that, ie int(np.around(x)) y2 = int(y0 - 1000*(a)) #cv2.line(im,(x1,y1),(x2,y2),(0,255,0),2) #print(str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2)) horizontal.append((x1,y1,x2,y2)) #cv2.imshow('houghlines',im) #cv2.waitKey(0) #cv2.destroyAllWindows() horizontal = sorted(horizontal,key=getKey) i = 0 while True: cv2.line(im,(horizontal[i][0],horizontal[i][1]),(horizontal[i][2],horizontal[i][3]),(200,0,0),2) cv2.imshow('houghlines',im) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imwrite("line.jpg",im) average = (horizontal[i][1]+horizontal[i][3])/2.0 percent = average/h actual = 100-(percent*100) if actual > 80 or actual < 20: i += 1 print(actual) elif actual <30: print("the coffee pot is getting low " + str(actual) + "% full!") else: print("the coffee pot is " + str(actual) + "% full!") onlineCoffee.updateCoffeeSite("The coffee pot is " + str(int(actual)) + "% full!") break
def griddect(img, debug=False): gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) edges = cv2.Canny(gray, 50, 150, apertureSize = 3) lines = cv2.HoughLines(edges, 2, np.pi/100, 320) v = [] h = [] for rho, theta in lines[0]: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*(a)) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*(a)) if int(a) == 0: h.append(y1) else: v.append(x1) if debug: cv2.line(img, (x1, y1), (x2, y2), (50, 50, 255), 2) height, width, channels = img.shape DEC = 0 def dist(numArr): for i in range(len(numArr) - 1): x = numArr[i + 1] - numArr[i] if x > 5: yield x v_points = np.unique(np.round(np.array(sorted(v)), decimals=DEC)) v_dist = list(dist(np.sort(v_points))) v_point_median = np.median(v_dist) h_points = np.unique(np.round(np.array(sorted(h)), decimals=DEC)) h_dist = list(dist(np.sort(h_points))) h_point_median = np.median(h_dist) return v_point_median, h_point_median
def get_angle(self, calibration_image): """ :param calibration_image: The HSV-image to use for calculation :return: Rotation angle of the field in image """ # TODO: correct return value comment? rgb = cv2.cvtColor(calibration_image, cv2.COLOR_HSV2BGR) angle = 0 count = 0 gray = cv2.cvtColor(cv2.cvtColor(calibration_image, cv2.COLOR_HSV2BGR), cv2.COLOR_BGR2GRAY) edges = cv2.Canny(gray, 50, 150, apertureSize=3) lines = cv2.HoughLines(edges, 1, np.pi/180, 110) if lines.shape[0]: line_count = lines.shape[0] else: raise Exception('field not detected') for x in range(line_count): for rho, theta in lines[x]: a = np.cos(theta) b = np.sin(theta) # print(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000*(-b)) y1 = int(y0 + 1000*a) x2 = int(x0 - 1000*(-b)) y2 = int(y0 - 1000*a) corr_angle = np.degrees(b) if corr_angle < 5: # print(CorrAngle) angle = angle + corr_angle count = count + 1 cv2.line(rgb, (x1, y1), (x2, y2), (0, 0, 255), 2) print(angle) if isinstance(angle, int) and isinstance(count, int): angle = angle / count self.angle = angle return angle else: self.angle = 0.1 return False