我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用cv2.minAreaRect()。
def img_contour_select(ctrs, im): # ???????????? cand_rect = [] for item in ctrs: epsilon = 0.02*cv2.arcLength(item, True) approx = cv2.approxPolyDP(item, epsilon, True) if len(approx) <= 8: rect = cv2.minAreaRect(item) if rect[1][0] < 20 or rect[1][1] < 20: continue if rect[1][0] > 150 or rect[1][1] > 150: continue #ratio = (rect[1][1]+0.00001) / rect[1][0] #if ratio > 1 or ratio < 0.9: # continue box = cv2.boxPoints(rect) box_d = np.int0(box) cv2.drawContours(im, [box_d], 0, (0,255,0), 3) cand_rect.append(box) img_show_hook("????", im) return cand_rect
def img_contour_select(ctrs, im): # ???????????? cand_rect = [] for item in ctrs: epsilon = 0.02*cv2.arcLength(item, True) approx = cv2.approxPolyDP(item, epsilon, True) if len(approx) <= 8: rect = cv2.minAreaRect(item) #???????? if rect[2] < -10 and rect[2] > -80: continue if rect[1][0] < 10 or rect[1][1] < 10: continue #ratio = (rect[1][1]+0.00001) / rect[1][0] #if ratio > 1 or ratio < 0.9: # continue box = cv2.boxPoints(rect) box_d = np.int0(box) cv2.drawContours(im, [box_d], 0, (0,255,0), 3) cand_rect.append(box) img_show_hook("????", im) return cand_rect
def verify_sizes(rectangle): # print candidate # help(cv2.minAreaRect) (x, y), (width, height), rect_angle = rectangle # Calculate angle and discard rects that has been rotated more than 15 degrees angle = 90 - rect_angle if (width < height) else -rect_angle if 15 < abs(angle) < 165: # 180 degrees is maximum return False # We make basic validations about the regions detected based on its area and aspect ratio. # We only consider that a region can be a plate if the aspect ratio is approximately 520/110 = 4.727272 # (plate width divided by plate height) with an error margin of 40 percent # and an area based on a minimum of 15 pixels and maximum of 125 pixels for the height of the plate. # These values are calculated depending on the image sizes and camera position: area = height * width if height == 0 or width == 0: return False if not satisfy_ratio(area, width, height): return False return True
def remove_border(contour, ary): """Remove everything outside a border contour.""" # Use a rotated rectangle (should be a good approximation of a border). # If it's far from a right angle, it's probably two sides of a border and # we should use the bounding box instead. c_im = np.zeros(ary.shape) r = cv2.minAreaRect(contour) degs = r[2] if angle_from_right(degs) <= 10.0: box = cv2.boxPoints(r) box = np.int0(box) cv2.drawContours(c_im, [box], 0, 255, -1) cv2.drawContours(c_im, [box], 0, 0, 4) else: x1, y1, x2, y2 = cv2.boundingRect(contour) cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1) cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4) return np.minimum(c_im, ary)
def _bounding_box_of(contour): rotbox = cv2.minAreaRect(contour) coords = cv2.boxPoints(rotbox) xrank = np.argsort(coords[:, 0]) left = coords[xrank[:2], :] yrank = np.argsort(left[:, 1]) left = left[yrank, :] right = coords[xrank[2:], :] yrank = np.argsort(right[:, 1]) right = right[yrank, :] # top-left, top-right, bottom-right, bottom-left box_coords = tuple(left[0]), tuple(right[0]), tuple(right[1]), tuple(left[1]) box_dims = rotbox[1] box_centroid = int((left[0][0] + right[1][0]) / 2.0), int((left[0][1] + right[1][1]) / 2.0) return box_coords, box_dims, box_centroid
def remove_border(contour, ary): """Remove everything outside a border contour.""" # Use a rotated rectangle (should be a good approximation of a border). # If it's far from a right angle, it's probably two sides of a border and # we should use the bounding box instead. c_im = np.zeros(ary.shape) r = cv2.minAreaRect(contour) degs = r[2] if angle_from_right(degs) <= 10.0: box = cv2.cv.BoxPoints(r) box = np.int0(box) cv2.drawContours(c_im, [box], 0, 255, -1) cv2.drawContours(c_im, [box], 0, 0, 4) else: x1, y1, x2, y2 = cv2.boundingRect(contour) cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1) cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4) return np.minimum(c_im, ary)
def _estimate_current_anticlockwise_degrees_using_minarearect(self, spot_xy) -> float: # Find the minimum area rectangle around the number nearby_contour_groups = contour_tools.extract_contour_groups_close_to( self.contour_groups, target_point_xy=spot_xy, delta=self._min_pixels_between_contour_groups) nearby_contours = [c for grp in nearby_contour_groups for c in grp] box = cv2.minAreaRect(np.row_stack(nearby_contours)) corners_xy = cv2.boxPoints(box).astype(np.int32) self._log_contours_on_current_image([corners_xy], name="Minimum area rectangle") # Construct a vector which, once correctly rotated, goes from the bottom right corner up & left at 135 degrees sorted_corners = sorted(corners_xy, key=lambda pt: np.linalg.norm(spot_xy - pt)) bottom_right_corner = sorted_corners[0] # The closest corner to the spot adjacent_corners = sorted_corners[1:3] # The next two closest corners unit_vectors_along_box_edge = misc.normalised(adjacent_corners - bottom_right_corner) up_left_diagonal = unit_vectors_along_box_edge.sum(axis=0) degrees_of_up_left_diagonal = np.rad2deg(np.arctan2(-up_left_diagonal[1], up_left_diagonal[0])) return degrees_of_up_left_diagonal - 135
def min_area_rect(xs, ys): """ Args: xs: numpy ndarray with shape=(N,4). N is the number of oriented bboxes. 4 contains [x1, x2, x3, x4] ys: numpy ndarray with shape=(N,4), [y1, y2, y3, y4] Note that [(x1, y1), (x2, y2), (x3, y3), (x4, y4)] can represent an oriented bbox. Return: the oriented rects sorrounding the box, in the format:[cx, cy, w, h, theta]. """ xs = np.asarray(xs, dtype = np.float32) ys = np.asarray(ys, dtype = np.float32) num_rects = xs.shape[0] box = np.empty((num_rects, 5))#cx, cy, w, h, theta for idx in xrange(num_rects): points = zip(xs[idx, :], ys[idx, :]) cnt = util.img.points_to_contour(points) rect = cv2.minAreaRect(cnt) cx, cy = rect[0] w, h = rect[1] theta = rect[2] box[idx, :] = [cx, cy, w, h, theta] box = np.asarray(box, dtype = xs.dtype) return box
def shapeFiltering(img): contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0] if len(contours) == 0: return "yoopsie" #else: #print contours """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8) cv2.drawContours(blank_image, contours, -1, (255, 255, 255)) cv2.imshow("imagiae", blank_image) cv2.waitKey()""" good_shape = [] for c in contours: x,y,w,h = cv2.boundingRect(c) """rect = cv2.minAreaRect(contour) box = cv2.boxPoints(rect) box = np.int0(box) w = """ #if h == 0: # continue ratio = w / h ratio_grade = ratio / (TMw / TMh) if 0.2 < ratio_grade < 1.8: good_shape.append(c) """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8) cv2.drawContours(blank_image, good_shape, -1, (255, 255, 255)) cv2.imshow("imagia", blank_image) cv2.waitKey()""" return good_shape
def findCorners(contour): """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8) cv2.drawContours(blank_image, contour, -1, (255, 255, 255)) rows,cols = img.shape[0], img.shape[1] M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5) dst = cv2.warpAffine(blank_image,M,(cols,rows)) cv2.imshow("rotatio", dst) cv2.waitKey()""" rect = cv2.minAreaRect(contour) box = cv2.boxPoints(rect) box = np.int0(box) height_px_1 = box[0][1] - box[3][1] height_px_2 = box[1][1] - box[2][1] print height_px_1, height_px_2 if height_px_1 < height_px_2: close_height_px = height_px_2 far_height_px = height_px_1 else: close_height_px = height_px_1 far_height_px = height_px_2 return close_height_px, far_height_px
def findCorners(contour): rect = cv2.minAreaRect(contour) box = cv2.boxPoints(rect) box = numpy.int0(box) height_px_1 = box[0][1] - box[3][1] height_px_2 = box[1][1] - box[2][1] print height_px_1, height_px_2 if height_px_1 < height_px_2: close_height_px = height_px_2 far_height_px = height_px_1 else: close_height_px = height_px_1 far_height_px = height_px_2 return close_height_px, far_height_px
def aspect_ratio_score(contour): rect = cv2.minAreaRect(contour) width = rect[1][0] height = rect[1][1] ratio_score = 0.0 # check to make sure the size is defined to prevent possible division by 0 error if width != 0 and height != 0: # the target is 1ft8in wide by 1ft2in high, so ratio of width/height is 20/14 ratio_score = 100 - abs((width / height) - (20 / 14)) return ratio_score
def aspect_ratio(rect): (x,y),(w,h),theta = cv2.minAreaRect(rect) return float(w) / float(h)
def get_image_parameters(self, image=None, contour=None, final=False): ''' updates angle of image, and centre using cv2 or PIL. NOTE: this assumes the floorplan is rectangular! if you live in a lighthouse, the angle will not be valid! input is cv2 contour or PIL image routines find the minnimum area rectangle that fits the image outline ''' if contour is not None and HAVE_CV2: # find minnimum area rectangle that fits # returns (x,y), (width, height), theta - where (x,y) is the center x_y,l_w,angle = cv2.minAreaRect(contour) elif image is not None and HAVE_PIL: x_y, angle = self.PIL_get_image_parameters(image) else: return if angle < self.angle - 45: angle += 90 if angle > 45-self.angle: angle -= 90 if final: self.cx = x_y[0] self.cy = x_y[1] self.angle = angle self.log.info("MAP: image center: x:%d, y:%d, angle %.2f" % (x_y[0], x_y[1], angle))
def filter(seg,area,label): """ Apply the filter. The final list is ranked by area. """ good = label[area > TextRegions.minArea] area = area[area > TextRegions.minArea] filt,R = [],[] for idx,i in enumerate(good): mask = seg==i xs,ys = np.where(mask) coords = np.c_[xs,ys].astype('float32') rect = cv2.minAreaRect(coords) box = np.array(cv2.cv.BoxPoints(rect)) h,w,rot = TextRegions.get_hw(box,return_rot=True) f = (h > TextRegions.minHeight and w > TextRegions.minWidth and TextRegions.minAspect < w/h < TextRegions.maxAspect and area[idx]/w*h > TextRegions.pArea) filt.append(f) R.append(rot) # filter bad regions: filt = np.array(filt) area = area[filt] R = [R[i] for i in xrange(len(R)) if filt[i]] # sort the regions based on areas: aidx = np.argsort(-area) good = good[filt][aidx] R = [R[i] for i in aidx] filter_info = {'label':good, 'rot':R, 'area': area[aidx]} return filter_info
def char2wordBB(self, charBB, text): """ Converts character bounding-boxes to word-level bounding-boxes. charBB : 2x4xn matrix of BB coordinates text : the text string output : 2x4xm matrix of BB coordinates, where, m == number of words. """ wrds = text.split() bb_idx = np.r_[0, np.cumsum([len(w) for w in wrds])] wordBB = np.zeros((2,4,len(wrds)), 'float32') for i in xrange(len(wrds)): cc = charBB[:,:,bb_idx[i]:bb_idx[i+1]] # fit a rotated-rectangle: # change shape from 2x4xn_i -> (4*n_i)x2 cc = np.squeeze(np.concatenate(np.dsplit(cc,cc.shape[-1]),axis=1)).T.astype('float32') rect = cv2.minAreaRect(cc.copy()) box = np.array(cv2.cv.BoxPoints(rect)) # find the permutation of box-coordinates which # are "aligned" appropriately with the character-bb. # (exhaustive search over all possible assignments): cc_tblr = np.c_[cc[0,:], cc[-3,:], cc[-2,:], cc[3,:]].T perm4 = np.array(list(itertools.permutations(np.arange(4)))) dists = [] for pidx in xrange(perm4.shape[0]): d = np.sum(np.linalg.norm(box[perm4[pidx],:]-cc_tblr,axis=1)) dists.append(d) wordBB[:,:,i] = box[perm4[np.argmin(dists)],:].T return wordBB
def get_bounding_rect(contour): rect = cv2.minAreaRect(contour) box = cv2.boxPoints(rect) return np.int0(box)
def generate_crack(img,label_img,num,ratio_area,aspect,length_ratio): ratio_area_l,ratio_area_h = ratio_area length_ratio_l,length_ratio_h = length_ratio area_l = img.shape[0]*img.shape[1]*ratio_area_l area_h = img.shape[0]*img.shape[1]*ratio_area_h length_l = min( img.shape[0],img.shape[1]) * length_ratio_l length_h = min( img.shape[0],img.shape[1]) * length_ratio_h count = 0 while True: rect_list = random_rect(1,img.shape,(0,1),(0,1)) handle_img = np.copy(img) rect = rect_list[0] x,y,h,w = rect # roi = handle_img[y:y+h,x:x+w] handle_img[y:y+h,x:x+w] = 0 pixelpoints,hull = convex_hull_generate(img,rect,10) w_,h_ = cv2.minAreaRect(hull)[1] # print w_,h_ if w_ != 0 and h_ != 0: if (cv2.contourArea(hull) > area_l and cv2.contourArea(hull) < area_h) and \ (w_/h_ > aspect or h_/w_ >aspect) and max(w_,h_) < length_h and min(w_,h_) > length_l: img[pixelpoints] = handle_img[pixelpoints] label_img[pixelpoints] = 255 count += 1 if count >= num: break
def generate_scratch(img,label_img,num,ratio_area,aspect,length_ratio): ratio_area_l,ratio_area_h = ratio_area length_ratio_l,length_ratio_h = length_ratio area_l = img.shape[0]*img.shape[1]*ratio_area_l area_h = img.shape[0]*img.shape[1]*ratio_area_h length_l = min( img.shape[0],img.shape[1]) * length_ratio_l length_h = min( img.shape[0],img.shape[1]) * length_ratio_h count = 0 while True: rect_list = random_rect(1,img.shape,(0,1),(0,1)) handle_img = np.copy(img) rect = rect_list[0] x,y,h,w = rect # roi = handle_img[y:y+h,x:x+w] handle_img[y:y+h,x:x+w] = 50 pixelpoints,hull = convex_hull_generate(img,rect,10) w_,h_ = cv2.minAreaRect(hull)[1] # print w_,h_,length_h,length_l,cv2.contourArea(hull),area_h,area_l if w_ != 0 and h_ != 0: if (cv2.contourArea(hull) > area_l and cv2.contourArea(hull) < area_h ) and \ (w_/h_ > aspect or h_/w_ >aspect) and max(w_,h_) < length_h and min(w_,h_) > length_l: img[pixelpoints] = handle_img[pixelpoints] label_img[pixelpoints] = 255 count += 1 if count >= num: break
def generate_spot(img,label_img,num,ratio_area,aspect): ratio_area_l,ratio_area_h = ratio_area area_l = img.shape[0]*img.shape[1]*ratio_area_l area_h = img.shape[0]*img.shape[1]*ratio_area_h count = 0 while True: rect_list = random_rect(1,img.shape,(0,1),(0,1)) handle_img = np.copy(img) rect = rect_list[0] x,y,h,w = rect # roi = handle_img[y:y+h,x:x+w] handle_img[y:y+h,x:x+w] = 255 pixelpoints,hull = convex_hull_generate(img,rect,10) w_,h_ = cv2.minAreaRect(hull)[1] if w_ != 0 and h_ != 0: if (cv2.contourArea(hull) > area_l and cv2.contourArea(hull) < area_h) and max(w_/h_,h_/w_) < aspect: #and max(w_,h_)>length: img[pixelpoints] = handle_img[pixelpoints] label_img[pixelpoints] = 255 count += 1 if count >= num: break
def verifySize(self, minAreaRect): # mr???? (top-left corner(x,y), (width, height), angle of rotation ) area = minAreaRect[1][0] * minAreaRect[1][1] if area > 6000 and area < 15000: return True return False
def calculateFrame(self,cap): data = self.getDataPoints() #targetCascade = cv2.CascadeClassifier(cascPath) frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) lower_bound = np.array([float(data['HMIN']),float(data["SMIN"]),float(data['VMIN'])]) upper_bound = np.array([float(data['HMAX']),float(data["SMAX"]),float(data['VMAX'])]) hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv,lower_bound,upper_bound) largest_area = 0 xCenter = -1 yCenter = -1 targetRect = None ret,thresh = cv2.threshold(mask,200,255,0) contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 1: areas = [cv2.contourArea(c) for c in contours] max_index = np.argmax(areas) cnt = contours[max_index] rect = cv2.minAreaRect(cnt) box = cv2.cv.BoxPoints(rect) box = np.int0(box) xCenter = (box[0][0] + box[1][0] + box[2][0] + box[3][0]) /4 yCenter = (box[0][1] + box[1][1] + box[2][1] + box[3][1]) /4 cv2.drawContours(frame,[box],0,(0,255,0),2) output = {} distance = 0.0025396523 * yCenter**2 + 0.1000098497 *yCenter + 46.8824851568 theta = math.atan2(xCenter-160, distance) output_dict = {"xCenter": xCenter, "yCenter": yCenter,"theta": theta, "distance":distance} output = json.dumps(output_dict) return frame ,output , True, mask
def getTargetBox(target): minRect = cv2.minAreaRect(target) box = cv2.cv.BoxPoints(minRect) #box = np.int0(box) # convert points to ints return box
def validate_contour(contour, img, aspect_ratio_range, area_range): rect = cv2.minAreaRect(contour) img_width = img.shape[1] img_height = img.shape[0] box = cv2.boxPoints(rect) box = np.int0(box) X = rect[0][0] Y = rect[0][1] angle = rect[2] width = rect[1][0] height = rect[1][1] angle = (angle + 180) if width < height else (angle + 90) output=False if (width > 0 and height > 0) and ((width < img_width/2.0) and (height < img_width/2.0)): aspect_ratio = float(width)/height if width > height else float(height)/width if (aspect_ratio >= aspect_ratio_range[0] and aspect_ratio <= aspect_ratio_range[1]): if((height*width > area_range[0]) and (height*width < area_range[1])): box_copy = list(box) point = box_copy[0] del(box_copy[0]) dists = [((p[0]-point[0])**2 + (p[1]-point[1])**2) for p in box_copy] sorted_dists = sorted(dists) opposite_point = box_copy[dists.index(sorted_dists[1])] tmp_angle = 90 if abs(point[0]-opposite_point[0]) > 0: tmp_angle = abs(float(point[1]-opposite_point[1]))/abs(point[0]-opposite_point[0]) tmp_angle = rad_to_deg(math.atan(tmp_angle)) if tmp_angle <= 45: output = True return output
def _find_a_thing(self, c, min_height, max_height, min_width, max_width, max_distance, debug_img=None): rect = cv2.minAreaRect(c) box = cv2.cv.BoxPoints(rect) if is_cv2() else cv2.boxPoints(rect) top,bottom,left,right,center = self.find_dimensions(np.int0(np.array(box))) if top is None or left is None or center is None: return None vertical = self.find_distance(top, bottom) horizontal = self.find_distance(left, right) away = self.find_distance(center, None) if vertical > horizontal: height = vertical width = horizontal flipped = False else: height = horizontal width = vertical flipped = True if height < min_height or height > max_height: return None if width < min_width or width > max_height: return None if away > max_distance: return None # This page was helpful in understanding angle # https://namkeenman.wordpress.com/2015/12/18/open-cv-determine-angle-of-rotatedrect-minarearect/ angle = rect[2] if rect[1][0] < rect[1][1]: angle -= 90.0 if debug_img is not None: x,y,w,h = cv2.boundingRect(c) cv2.drawContours(debug_img, [c], -1, (0, 255, 0), 2) cv2.drawContours(debug_img, [np.int0(np.array(box))], -1, (0, 0, 255), 2) cv2.rectangle(debug_img,(x,y),(x+w,y+h),(255,0,0),2) cv2.circle(debug_img, top, 5, (255, 255, 0)) cv2.circle(debug_img, bottom, 5, (255, 255, 0)) cv2.circle(debug_img, left, 5, (255, 255, 0)) cv2.circle(debug_img, right, 5, (255, 255, 0)) cv2.circle(debug_img, center, 5, (255, 255, 0)) return Thing(height, width, center, angle)
def img_tesseract_detect(c_rect, im): # ????minAreaRect??????-90~0?????????????????? # ??????????????????????????????????????? pts = c_rect.reshape(4, 2) rect = np.zeros((4, 2), dtype = "float32") # the top-left point has the smallest sum whereas the # bottom-right has the largest sum s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[3] = pts[np.argmax(s)] # compute the difference between the points -- the top-right # will have the minumum difference and the bottom-left will # have the maximum difference diff = np.diff(pts, axis = 1) rect[2] = pts[np.argmin(diff)] rect[1] = pts[np.argmax(diff)] dst = np.float32([[0,0],[0,100],[200,0],[200,100]]) M = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(im, M, (200, 100)) img_show_hook("??????", warp) warp = np.array(warp, dtype=np.uint8) radius = 10 selem = disk(radius) #????????OTSU???? local_otsu = rank.otsu(warp, selem) l_otsu = np.uint8(warp >= local_otsu) l_otsu *= 255 kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(4, 4)) l_otsu = cv2.morphologyEx(l_otsu, cv2.MORPH_CLOSE, kernel) img_show_hook("?????OTSU??", l_otsu) print("?????") print(pytesseract.image_to_string(Image.fromarray(l_otsu))) cv2.waitKey(0) return
def img_tesseract_detect(c_rect, im): # ????minAreaRect??????-90~0?????????????????? # ??????????????????????????????????????? pts = c_rect.reshape(4, 2) rect = np.zeros((4, 2), dtype = "float32") # the top-left point has the smallest sum whereas the # bottom-right has the largest sum s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[3] = pts[np.argmax(s)] # compute the difference between the points -- the top-right # will have the minumum difference and the bottom-left will # have the maximum difference diff = np.diff(pts, axis = 1) rect[2] = pts[np.argmin(diff)] rect[1] = pts[np.argmax(diff)] width = rect[3][0] - rect[0][0] height = rect[3][1] - rect[0][1] width = (int)((50.0 / height) * width) height = 50 dst = np.float32([[0,0],[0,height],[width,0],[width,height]]) M = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(im, M, (width, height)) img_show_hook("??????", warp) warp = np.array(warp, dtype=np.uint8) radius = 13 selem = disk(radius) #????????OTSU???? local_otsu = rank.otsu(warp, selem) l_otsu = np.uint8(warp >= local_otsu) l_otsu *= 255 kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2)) l_otsu = cv2.morphologyEx(l_otsu, cv2.MORPH_CLOSE, kernel) img_show_hook("?????OTSU??", l_otsu) print("?????") print(pytesseract.image_to_string(Image.fromarray(l_otsu), lang="chi-sim")) cv2.waitKey(0) return
def get_contours(orig_image): """ Get edge points (hopefully corners) from the given opencv image (called contours in opencv) Parameters: :param: `orig_image` - the thresholded image from which to find contours """ new_image = numpy.copy(orig_image) # cv2.imshow("Vision", new_image) # cv2.waitKey(1000) new_image, contours, hierarchy = cv2.findContours(new_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # print(len(contours)) # print(len(contours[0])) # print(len(contours[0][0])) # print(len(contours[0][0][0])) largest_contour = 0 most_matching = 0 min_score = 0 max_area = 0 if len(contours) > 1: print("Length of contours:", len(contours)) max_area = cv2.contourArea(contours[0]) min_score = average_goal_matching(contours[0]) for i in range(1, len(contours)): # print(contours[i]) current_score = average_goal_matching(contours[i]) current_area = cv2.contourArea(contours[i]) if current_area > max_area: max_area = current_area largest_contour = i if current_score < min_score and current_score != 0 and current_area > 300 and current_area < 1500: min_score = current_score most_matching = i elif len(contours) == 0: raise GoalNotFoundException("Goal not found!") if min_score >= 9999999999999999: raise GoalNotFoundException("Goal not found!") print("largest_contour:", largest_contour) print("Area:", max_area) # print("largest_contour:", largest_contour) print("Most matching:", most_matching) print("Score:", min_score) print("Area of most matching:", cv2.contourArea(contours[most_matching])) rect = cv2.minAreaRect(contours[most_matching]) box = cv2.boxPoints(rect) box = numpy.int0(box) # print(box) return numpy.array(contours[most_matching]), box
def image_callback(self, msg): # convert ROS image to OpenCV image try: image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') except CvBridgeError as e: print(e) # create hsv image of scene hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # find pink objects in the image lower_pink = numpy.array([139, 0, 240], numpy.uint8) upper_pink = numpy.array([159, 121, 255], numpy.uint8) mask = cv2.inRange(hsv, lower_pink, upper_pink) # dilate and erode with kernel size 11x11 cv2.morphologyEx(mask, cv2.MORPH_CLOSE, numpy.ones((11,11))) # find all of the contours in the mask image contours, heirarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) self.contourLength = len(contours) # Check for at least one target found if self.contourLength < 1: print "No target found" else: # target found ## Loop through all of the contours, and get their areas area = [0.0]*len(contours) for i in range(self.contourLength): area[i] = cv2.contourArea(contours[i]) #### Target #### the largest "pink" object target_image = contours[area.index(max(area))] # Using moments find the center of the object and draw a red outline around the object target_m = cv2.moments(target_image) self.target_u = int(target_m['m10']/target_m['m00']) self.target_v = int(target_m['m01']/target_m['m00']) points = cv2.minAreaRect(target_image) box = cv2.cv.BoxPoints(points) box = numpy.int0(box) cv2.drawContours(image, [box], 0, (0, 0, 255), 2) rospy.loginfo("Center of target is x at %d and y at %d", int(self.target_u), int(self.target_v)) self.target_found = True # set flag for depth_callback processing # show image with target outlined with a red rectangle cv2.imshow ("Target", image) cv2.waitKey(3) # This callback function handles processing Kinect depth image, looking for the depth value # at the location of the center of the pink target.
def transform_cv_rect(rects): """Transform the rects from opencv method minAreaRect to our rects. Step 1 of Figure 5 in seglink paper In cv2.minAreaRect, the w, h and theta values in the returned rect are not convenient to use (at least for me), so the Oriented (or rotated) Rectangle object in seglink algorithm is defined different from cv2. Rect definition in Seglink: 1. The angle value between a side and x-axis is: positive: if it rotates clockwisely, with y-axis increasing downwards. negative: if it rotates counter-clockwisely. This is opposite to cv2, and it is only a personal preference. 2. The width is the length of side taking a smaller absolute angle with the x-axis. 3. The theta value of a rect is the signed angle value between width-side and x-axis 4. To rotate a rect to horizontal direction, just rotate its width-side horizontally, i.e., rotate it by a angle of theta using cv2 method. (see the method rotate_oriented_bbox_to_horizontal for rotation detail) Args: rects: ndarray with shape = (5, ) or (N, 5). Return: transformed rects. """ only_one = False if len(np.shape(rects)) == 1: rects = np.expand_dims(rects, axis = 0) only_one = True assert np.shape(rects)[1] == 5, 'The shape of rects must be (N, 5), but meet %s'%(str(np.shape(rects))) rects = np.asarray(rects, dtype = np.float32).copy() num_rects = np.shape(rects)[0] for idx in xrange(num_rects): cx, cy, w, h, theta = rects[idx, ...]; #assert theta < 0 and theta >= -90, "invalid theta: %f"%(theta) if abs(theta) > 45 or (abs(theta) == 45 and w < h): w, h = [h, w] theta = 90 + theta rects[idx, ...] = [cx, cy, w, h, theta] if only_one: return rects[0, ...] return rects
def filterContoursFancy(contours, image=None): if len(contours) == 0: return [] numContours = len(contours) areas = np.array([cv2.contourArea(contour) for contour in contours]) boundingRects = [cv2.boundingRect(contour) for contour in contours] widths, heights, positions = boundingInfo(boundingRects) rotatedRects = [cv2.minAreaRect(contour) for contour in contours] if config.withOpenCV3: rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects] else: rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects] rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes] sizeScores = [size(area)for area in areas] ratioScores = ratios(widths, heights) rotationScores = [rotation(rect) for rect in rotatedRects] rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)] areaScores = polygonAreaDiff(areas, rotatedAreas) quadScores = [Quadrify(contour) for contour in contours] rectangularScores = np.divide(rectangularScores, widths) scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores]) contourScores = np.dot(weights, scores) correctInds, incorrectInds = sortedInds(contourScores) correctContours = np.array(contours)[correctInds] if config.extra_debug: print "size, ratio, rotation, rectangular, area, quad" print "Weights:", weights print "Scores: ", contourScores print np.average(scores, axis=1) if len(incorrectInds) != 0: print "AVG, WORST", test(scores, correctInds, incorrectInds) for i in range(numContours): print "CONTOUR " + str(i) print np.multiply(scores[:, i], weights) #newWeights print contourScores[i] if image: img = copy.deepcopy(image) Printing.drawImage(img, contours[:i] + contours[i+1:], contours[i], False) Printing.display(img, "contour " + str(i), doResize=True) cv2.waitKey(0) cv2.destroyAllWindows() return correctContours
def filterContoursAutocalibrate(contours, image=None): if len(contours) == 0: return [] numContours = len(contours) areas = np.array([cv2.contourArea(contour) for contour in contours]) boundingRects = [cv2.boundingRect(contour) for contour in contours] widths, heights, positions = boundingInfo(boundingRects) rotatedRects = [cv2.minAreaRect(contour) for contour in contours] if config.withOpenCV3: rotatedBoxes = [np.int0(cv2.boxPoints(rect)) for rect in rotatedRects] else: rotatedBoxes = [np.int0(cv2.cv.BoxPoints(rect)) for rect in rotatedRects] rotatedAreas = [cv2.contourArea(box) for box in rotatedBoxes] sizeScores = [size(area)for area in areas] ratioScores = ratios(widths, heights) rotationScores = [rotation(rect) for rect in rotatedRects] rectangularScores = [distToPolygon(contour, poly) for contour,poly in zip(contours, rotatedBoxes)] areaScores = polygonAreaDiff(areas, rotatedAreas) quadScores = [Quadrify(contour) for contour in contours] rectangularScores = np.divide(rectangularScores, widths) scores = np.array([sizeScores, ratioScores, rotationScores, rectangularScores, areaScores, quadScores]) contourScores = np.dot(weights, scores) correctInds, incorrectInds = sortedInds(contourScores) correctContours = np.array(contours)[correctInds] averageScore = 0 for i in range(numContours): averageScore += sizeScores[i] averageScore += ratioScores[i] averageScore += rotationScores[i] averageScore += rectangularScores[i] averageScore += areaScores[i] averageScore += quadScores[i] averageScore /= numContours return averageScore
def detect_barcode(imageval): # load the image and convert it to grayscale file_bytes = np.asarray(bytearray(imageval), dtype=np.uint8) img_data_ndarray = cv2.imdecode(file_bytes, cv2.CV_LOAD_IMAGE_UNCHANGED) gray = cv2.cvtColor(img_data_ndarray, cv2.COLOR_BGR2GRAY) # compute the Scharr gradient magnitude representation of the images # in both the x and y direction gradX = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 1, dy = 0, ksize = -1) gradY = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 0, dy = 1, ksize = -1) # subtract the y-gradient from the x-gradient gradient = cv2.subtract(gradX, gradY) gradient = cv2.convertScaleAbs(gradient) # blur and threshold the image blurred = cv2.blur(gradient, (9, 9)) (_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY) # construct a closing kernel and apply it to the thresholded image kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7)) closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) # perform a series of erosions and dilations closed = cv2.erode(closed, None, iterations = 4) closed = cv2.dilate(closed, None, iterations = 4) # find the contours in the thresholded image, then sort the contours # by their area, keeping only the largest one (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) c = sorted(cnts, key = cv2.contourArea, reverse = True)[0] # compute the rotated bounding box of the largest contour rect = cv2.minAreaRect(c) box = np.int0(cv2.cv.BoxPoints(rect)) # draw a bounding box arounded the detected barcode and display the # image cv2.drawContours(img_data_ndarray, [box], -1, (0, 255, 0), 3) # cv2.imshow("Image", image) #cv2.imwrite("uploads/output-"+ datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") +".jpg",image) # cv2.waitKey(0) #outputfile = "uploads/output-" + time.strftime("%H:%M:%S") + ".jpg" outputfile = "uploads/output.jpg" cv2.imwrite(outputfile,img_data_ndarray)