Python cv2 模块,drawContours() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用cv2.drawContours()

项目:object-detection-python-opencv    作者:hasanaliqureshi    | 项目源码 | 文件源码
def find_biggest_contour(image):
    # Copy
    image = image.copy()
    #input, gives all the contours, contour approximation compresses horizontal,
    #vertical, and diagonal segments and leaves only their end points. For example,
    #an up-right rectangular contour is encoded with 4 points.
    #Optional output vector, containing information about the image topology.
    #It has as many elements as the number of contours.
    #we dont need it
    _, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    # Isolate largest contour
    contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
    biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]

    mask = np.zeros(image.shape, np.uint8)
    cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
    return biggest_contour, mask
项目:illumeme    作者:josmcg    | 项目源码 | 文件源码
def find_triangles(filename):
    FIRST = 0
    RED = (0, 0, 255)
    THICKNESS = 3
    copy = img = cv2.imread(filename)
    grey_img = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)
    ret, thresh = cv2.threshold(grey_img, 127, 255, 1)
    contours, h = cv2.findContours(thresh, 1, 2)
    largest = None
    for contour in countours:
        approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
        if len(approx) == 3:
            #triangle found
            if largest is None or cv2.contourArea(contour) > cv2.contourArea(largest):
                largest = contour

    #write file
    cv2.drawContours(copy, [largest], FIRST, RED, THICKNESS)
    cv2.imwrite(filename +"_result", copy)
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def remove_borders(image):
    ratio = image.shape[0] / 500.0
    orig = image.copy()
    image = resize(image, height=500)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)
    edged = cv2.Canny(gray, 75, 200)
    _, cnts, _ = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cv2.imshow('edged', edged)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
    screenCnt = None
    for c in cnts:
        peri = cv2.arcLength(c, True)
        approx = cv2.approxPolyDP(c, 0.02 * peri, True)
        print(len(approx) == 4)
        if len(approx) == 4:
            screenCnt = approx
            break
    cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)
    if screenCnt is not None and len(screenCnt) > 0:
        return four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
    return orig
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_select(ctrs, im):
    # ????????????
    cand_rect = []
    for item in ctrs:
        epsilon = 0.02*cv2.arcLength(item, True)
        approx = cv2.approxPolyDP(item, epsilon, True)  
        if len(approx) <= 8:
            rect = cv2.minAreaRect(item)
            if rect[1][0] < 20 or rect[1][1] < 20:
                continue
            if rect[1][0] > 150 or rect[1][1] > 150:
                continue        
            #ratio = (rect[1][1]+0.00001) / rect[1][0]
            #if ratio > 1 or ratio < 0.9:
            #    continue
            box = cv2.boxPoints(rect)
            box_d = np.int0(box)
            cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
            cand_rect.append(box)
    img_show_hook("????", im)   
    return cand_rect
项目:Vision2016    作者:Team3309    | 项目源码 | 文件源码
def profile_score(contour, binary):
    """
    Calculate a score based on the "profile" of the target, basically how closely its geometry matches with the expected
    geometry of the goal
    :param contour:
    :param binary:
    :return:
    """
    bounding = cv2.boundingRect(contour)
    pixels = np.zeros((binary.shape[0], binary.shape[1]))
    cv2.drawContours(pixels, [contour], -1, 255, -1)
    col_averages = np.mean(pixels, axis=0)[bounding[0]:bounding[0] + bounding[2]]
    row_averages = np.mean(pixels, axis=1)[bounding[1]:bounding[1] + bounding[3]]
    # normalize to between 0 and 1
    col_averages *= 1.0 / col_averages.max()
    row_averages *= 1.0 / row_averages.max()

    col_diff = np.subtract(col_averages, col_profile(col_averages.shape[0], bounding[2]))
    row_diff = np.subtract(row_averages, row_profile(row_averages.shape[0], bounding[3]))

    # average difference should be close to 0
    avg_diff = np.mean([np.mean(col_diff), np.mean(row_diff)])
    return 100 - (avg_diff * 50)
项目:2017-robot    作者:frc1418    | 项目源码 | 文件源码
def find_contours(self, img):

        thresh_img = self.threshold(img)

        _, contours, _ = cv2.findContours(thresh_img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
        result = []
        for cnt in contours:
            approx = cv2.approxPolyDP(cnt, 0.01*cv2.arcLength(cnt, True), True)

            if self.draw_approx:
                cv2.drawContours(self.out, [approx], -1, self.BLUE, 2, lineType=8)

            if len(approx) > 3 and len(approx) < 15:
                _, _, w, h = cv2.boundingRect(approx)
                if h > self.min_height and w > self.min_width:
                        hull = cv2.convexHull(cnt)
                        approx2 = cv2.approxPolyDP(hull,0.01*cv2.arcLength(hull,True),True)

                        if self.draw_approx2:
                            cv2.drawContours(self.out, [approx2], -1, self.GREEN, 2, lineType=8)

                        result.append(approx2)
        return result
项目:dust_repos    作者:taozhijiang    | 项目源码 | 文件源码
def img_contour_select(ctrs, im):
    # ????????????
    cand_rect = []
    for item in ctrs:
        epsilon = 0.02*cv2.arcLength(item, True)
        approx = cv2.approxPolyDP(item, epsilon, True)  
        if len(approx) <= 8:
            rect = cv2.minAreaRect(item)
            #????????
            if rect[2] < -10 and rect[2] > -80:
                continue
            if rect[1][0] < 10 or rect[1][1] < 10:
                continue
            #ratio = (rect[1][1]+0.00001) / rect[1][0]
            #if ratio > 1 or ratio < 0.9:
            #    continue
            box = cv2.boxPoints(rect)
            box_d = np.int0(box)
            cv2.drawContours(im, [box_d], 0, (0,255,0), 3)
            cand_rect.append(box)
    img_show_hook("????", im)   
    return cand_rect
项目:PiStorms    作者:mindsensors    | 项目源码 | 文件源码
def findSquare( self,frame ):
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        blurred = cv2.GaussianBlur(gray, (7, 7), 0)
        edged = cv2.Canny(blurred, 60, 60)
        # find contours in the edge map
        (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        # loop over our contours to find hexagon
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:50]
        screenCnt = None
        for c in cnts:
            # approximate the contour
            peri = cv2.arcLength(c, True)
            approx = cv2.approxPolyDP(c, 0.004 * peri, True)
            # if our approximated contour has four points, then
            # we can assume that we have found our squeare

            if len(approx) >= 4:
                screenCnt = approx
                x,y,w,h = cv2.boundingRect(c)
                cv2.drawContours(image, [approx], -1, (0, 0, 255), 1)
                #cv2.imshow("Screen", image)
                #create the mask and remove rest of the background
                mask = np.zeros(image.shape[:2], dtype = "uint8")
                cv2.drawContours(mask, [screenCnt], -1, 255, -1)
                masked = cv2.bitwise_and(image, image, mask = mask)
                #cv2.imshow("Masked",masked  )
                #crop the masked image to to be compared to referance image
                cropped = masked[y:y+h,x:x+w]
                #scale the image so it is fixed size as referance image
                cropped = cv2.resize(cropped, (200,200), interpolation =cv2.INTER_AREA)

                return cropped
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_corners(self, image):
        """
        Find the 4 corners of a binary image
        :param image: binary image
        :return: 4 main vertices or None
        """
        cnts, _ = cv2.findContours(image.copy(),
                                   cv2.RETR_EXTERNAL,
                                   cv2.CHAIN_APPROX_SIMPLE)[-2:]
        cnt = cnts[0]
        _, _, h, w = cv2.boundingRect(cnt)
        epsilon = min(h, w) * 0.5
        o_vertices = cv2.approxPolyDP(cnt, epsilon, True)
        vertices = cv2.convexHull(o_vertices, clockwise=True)
        vertices = self.correct_vertices(vertices)

        if self.debug:
            temp = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR)
            cv2.drawContours(temp, cnts, -1, (0, 255, 0), 10)
            cv2.drawContours(temp, o_vertices, -1, (255, 0, 0), 30)
            cv2.drawContours(temp, vertices, -1, (0, 0, 255), 20)
            self.save2image(temp)

        return vertices
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def visualize(frame, coordinates_list, alpha = 0.80, color=[255, 255, 255]):
    """
    Args:
        1. frame:               OpenCV's image which has to be visualized.
        2. coordinates_list:    List of coordinates which will be visualized in the given `frame`
        3. alpha, color:        Some parameters which help in visualizing properly. 
                                A convex hull will be shown for each element in the `coordinates_list` 
    """
    layer = frame.copy()
    output = frame.copy()

    for coordinates in coordinates_list:
        c_hull = cv2.convexHull(coordinates)
        cv2.drawContours(layer, [c_hull], -1, color, -1)

    cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output)
    cv2.imshow("Output", output)
项目:CE264-Computer_Vision    作者:RobinCPC    | 项目源码 | 文件源码
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax):
        cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0)
        crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax]
        grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY)

        _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

        cv2.imshow('Thresh', thresh1)
        contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

        # draw contour on threshold image
        if len(contours) > 0:
            cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3)

        return contours, crop_res


# Check ConvexHull  and Convexity Defects
项目:OpenAI_Challenges    作者:AlwaysLearningDeeper    | 项目源码 | 文件源码
def process_img(img):
    original_image=img
    processed_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
    processed_img = cv2.GaussianBlur(processed_img, (3,3), 0 )
    copy=processed_img
    vertices = np.array([[30, 240], [30, 100], [195, 100], [195, 240]])
    processed_img = roi(processed_img, np.int32([vertices]))
    verticesP = np.array([[30, 270], [30, 230], [197, 230], [197, 270]])
    platform = roi(copy, np.int32([verticesP]))
    #                       edges
    #lines = cv2.HoughLinesP(platform, 1, np.pi/180, 180,np.array([]), 3, 2)
    #draw_lines(processed_img,lines)
    #draw_lines(original_image,lines)

    #Platform lines
    #imgray = cv2.cvtColor(platform,cv2.COLOR_BGR2GRAY)
    ret,thresh = cv2.threshold(platform,127,255,0)
    im2, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(original_image, contours, -1, (0,255,0), 3)
    try:
        platformpos=contours[0][0][0]
    except:
        platformpos=[[0]]
    circles = cv2.HoughCircles(processed_img, cv2.HOUGH_GRADIENT, 1, 20,
                               param1=90, param2=5, minRadius=1, maxRadius=3)

    ballpos=draw_circles(original_image,circles=circles)

    return processed_img,original_image,platform,platformpos,ballpos
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def gimpMarkup(self, hints = gimpContours, image = "2x2-red-1.jpg", feature = "top-left-monitor"):
        r = Rectangle(*hints[image][feature])
        contour = r.asContour()
        cv2.drawContours(self.img, [contour], -1, (0, 255, 0), 5 )
        title = self.tgen.next(feature)
        if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        roi = r.getRoi(self.img)
        self.rois[feature] = roi
        # Histogram the ROI to get the spread of intensities, in each channel and grayscale
        title = '%s-roi.jpg' % feature
        if self.show: ImageViewer(roi).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        colors = ('b','g','r')
        for i,col in enumerate(colors):
            hist = cv2.calcHist([roi], [i], None, [256], [0,256])
            plt.plot(hist, color = col)
            plt.xlim([0,256])
            #plt.hist(roi.ravel(), 256, [0,256])
        plt.show()
        cmap = ColorMapper(roi)
        cmap.mapit(1)
        title = self.tgen.next('colourMapping')
        if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title)
        cv2.waitKey()
项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def locate(self, all = False, show = False, outimg = None):
        for (transition, mask) in self.transitions:
            if transition == 1:
                sfv3 = SquareFinderV3(mask, cos_limit = 0.5)
                squares = sfv3.find(self.mode)
                if show:
                    SquaresOverlayV4(mask, squares, all = all)
                    SquaresOverlayV4(mask, squares, all = False)
                else:
                    square_contours = [square.contour for square in squares]
                    best_contours_tuples = classify_multi_monitors_contour_set(square_contours)
                    found = mask.copy()
                    self.best_contours = [contour.astype('int32') for (contour, index) in best_contours_tuples]
                    cv2.drawContours( found, self.best_contours, -1, (0,0,255),3)
                    if outimg:
                        cv2.imwrite(outimg, found)
                return self.best_contours
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def drawBox(self, img):
        axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
                          [0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
        imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
        imgpts = np.int32(imgpts).reshape(-1,2)

        # draw pillars in blue color
        for i,j in zip(range(4),range(4,8)):
            img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)

        # draw top layer in red color
        outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)

        return outImg

# Debug Code.
项目:AR-BXT-AR4Python    作者:GeekLiB    | 项目源码 | 文件源码
def drawBox(self, img):
        axis = np.float32([[0,0,0], [0,1,0], [1,1,0], [1,0,0],
                          [0,0,-1],[0,1,-1],[1,1,-1],[1,0,-1] ])
        imgpts, jac = cv2.projectPoints(axis, self.RVEC, self.TVEC, self.MTX, self.DIST)
        imgpts = np.int32(imgpts).reshape(-1,2)

        # draw pillars in blue color
        for i,j in zip(range(4),range(4,8)):
            img2 = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]),(255,0,0),3)

        # draw top layer in red color
        outImg = cv2.drawContours(img2, [imgpts[4:]],-1,(0,0,255),3)

        return outImg

# Debug Code.
项目:cervix-roi-segmentation-by-unet    作者:scottykwok    | 项目源码 | 文件源码
def cropCircle(img, resize=None):
    if resize:
        if (img.shape[0] > img.shape[1]):
            tile_size = (int(img.shape[1] * resize / img.shape[0]), resize)
        else:
            tile_size = (resize, int(img.shape[0] * resize / img.shape[1]))
        img = cv2.resize(img, dsize=tile_size, interpolation=cv2.INTER_CUBIC)
    else:
        tile_size = img.shape

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key=cv2.contourArea, reverse=True)[0]

    ff = np.zeros((gray.shape[0], gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0] + 2, gray.shape[1] + 2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1] / 2), int(gray.shape[0] / 2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0], rect[2]), max(rect[0], rect[2]), min(rect[1], rect[3]), max(rect[1], rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff, (min(rect[1], rect[3]), min(rect[0], rect[2])), (max(rect[1], rect[3]), max(rect[0], rect[2])), 3,
                  2)

    return [img_crop, rectangle, tile_size]
项目:pycolor_detection    作者:parth1993    | 项目源码 | 文件源码
def findSignificantContours(img, sobel_8u, sobel):
    image, contours, heirarchy = cv2.findContours(sobel_8u, \
                                                  cv2.RETR_EXTERNAL, \
                                                  cv2.CHAIN_APPROX_SIMPLE)
    mask = np.ones(image.shape[:2], dtype="uint8") * 255

    level1 = []
    for i, tupl in enumerate(heirarchy[0]):

        if tupl[3] == -1:
            tupl = np.insert(tupl, 0, [i])
            level1.append(tupl)
    significant = []
    tooSmall = sobel_8u.size * 10 / 100
    for tupl in level1:
        contour = contours[tupl[0]];
        area = cv2.contourArea(contour)
        if area > tooSmall:
            cv2.drawContours(mask, \
                             [contour], 0, (0, 255, 0), \
                             2, cv2.LINE_AA, maxLevel=1)
            significant.append([contour, area])
    significant.sort(key=lambda x: x[1])
    significant = [x[0] for x in significant];
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)
    mask = sobel.copy()
    mask[mask > 0] = 0
    cv2.fillPoly(mask, significant, 255, 0)
    mask = np.logical_not(mask)
    img[mask] = 0;

    return img
项目:srcsim2017    作者:ZarjRobotics    | 项目源码 | 文件源码
def side_intersect(self, image, contours, row, markup=True):
        """ Find intersections to both sides along a row """
        if markup:
            cv2.line(image, (0, row), (image.shape[1], row), (0, 0, 255), 1)

        cnt_l, col_l = self.find_intersect(image, contours, row, -1)
        if markup and cnt_l is not None:
            cv2.drawContours(image, [contours[cnt_l]], -1, (0, 255, 255), -1)
            cv2.circle(image, (col_l, row), 4, (0, 255, 0), 2)

        cnt_r, col_r = self.find_intersect(image, contours, row, 1)
        if markup and cnt_r is not None:
            cv2.drawContours(image, [contours[cnt_r]], -1, (255, 255, 0), -1)
            cv2.circle(image, (col_r, row), 4, (0, 255, 0), 2)

        return (cnt_l, col_l), (cnt_r, col_r)
项目:quadrilaterals-rectifier    作者:michal2229    | 项目源码 | 文件源码
def extract_rect(im):
    imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)

    ret,thresh = cv2.threshold(imgray, 127, 255, 0)

    contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # finding contour with max area
    largest = None
    for cnt in contours:
        if largest == None or cv2.contourArea(cnt) > cv2.contourArea(largest):
            largest = cnt

    peri = cv2.arcLength(largest, True)
    appr = cv2.approxPolyDP(largest, 0.02 * peri, True)

    #cv2.drawContours(im, appr, -1, (0,255,0), 3)
    points_list = [[i[0][0], i[0][1]] for i in appr] 

    left  = sorted(points_list, key = lambda p: p[0])[0:2]
    right = sorted(points_list, key = lambda p: p[0])[2:4]

    print("l " + str(left))
    print("r " + str(right))

    lu = sorted(left, key = lambda p: p[1])[0]
    ld = sorted(left, key = lambda p: p[1])[1]

    ru = sorted(right, key = lambda p: p[1])[0]
    rd = sorted(right, key = lambda p: p[1])[1]

    print("lu " + str(lu))
    print("ld " + str(ld))
    print("ru " + str(ru))
    print("rd " + str(rd))

    lu_ = [ (lu[0] + ld[0])/2, (lu[1] + ru[1])/2 ]
    ld_ = [ (lu[0] + ld[0])/2, (ld[1] + rd[1])/2 ]
    ru_ = [ (ru[0] + rd[0])/2, (lu[1] + ru[1])/2 ]
    rd_ = [ (ru[0] + rd[0])/2, (ld[1] + rd[1])/2 ]

    print("lu_ " + str(lu_))
    print("ld_ " + str(ld_))
    print("ru_ " + str(ru_))
    print("rd_ " + str(rd_))

    src_pts = np.float32(np.array([lu, ru, rd, ld]))
    dst_pts = np.float32(np.array([lu_, ru_, rd_, ld_]))

    h,w,b = im.shape
    H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

    print("H" + str(H))

    imw =  cv2.warpPerspective(im, H, (w, h))

    return imw[lu_[1]:rd_[1], lu_[0]:rd_[0]] # cropping image
项目:idmatch    作者:maddevsio    | 项目源码 | 文件源码
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.boxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary)
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def getMask(self, shape):

        p=self.state['pos']
        s=self.state['size']
        center=p + s / 2
        a=self.state['angle']
        # opencv convention:
        shape = (shape[1], shape[0])
        arr1 = np.zeros(shape, dtype=np.uint8)
        arr2 = np.zeros(shape, dtype=np.uint8)

        # draw rotated rectangle:
        vertices = np.int0(cv2.boxPoints((center, s, a)))
        cv2.drawContours(arr1, [vertices], 0, color=1, thickness=-1)
        # draw ellipse:
        cv2.ellipse(arr2, (int(center[0]), int(center[1])), (int(s[0] / 2 * self._ratioEllispeRectangle),
                     int(s[1] / 2 * self._ratioEllispeRectangle)), int(a),
                    startAngle=0, endAngle=360, color=1, thickness=-1)
        # bring both together:
        return np.logical_and(arr1, arr2).T
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def getMask(self, shape):

        p = self.state['pos']
        s = self.state['size']
        center = p + s / 2
        a = self.state['angle']
        # opencv convention:
        shape = (shape[1], shape[0])
        arr = np.zeros(shape, dtype=np.uint8)
        # draw rotated rectangle:
        vertices = np.int0(cv2.boxPoints((center, s, a)))
        cv2.drawContours(arr, [vertices],
                         0,
                         color=1,
                         thickness=-1)
        return arr.astype(bool).T
项目:OpenCV2    作者:SarathM1    | 项目源码 | 文件源码
def lipSegment(self, img):
        # self.t1 = cv2.getTickCount()
        lipHull = self.dlib_obj.get_landmarks(img)
        cv2.drawContours(img, lipHull, -1, (255, 0, 0), 2)
        (x, y), (MA, ma), angle = cv2.fitEllipse(lipHull)
        a = ma/2
        b = MA/2

        eccentricity = sqrt(pow(a, 2)-pow(b, 2))
        eccentricity = round(eccentricity/a, 2)

        cv2.putText(img, 'E = '+str(round(eccentricity, 3)), (10, 350),
                    self.font, 1, (255, 0, 0), 1)

        if(eccentricity < 0.9):
            self.flags.cmd = 'b'
        else:
            self.flags.cmd = 'f'

        if angle < 80:
            self.flags.cmd = 'l'
        elif angle > 100:
            self.flags.cmd = 'r'

        cv2.putText(img, 'Cmd = ' + self.flags.cmd, (10, 300),  self.font,  1,
                    (0, 0, 255), 1, 16)
        # self.t2 = cv2.getTickCount()
        # print "Time = ", (self.t2-self.t1)/cv2.getTickFrequency()
        return img
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 35000 and cv2.contourArea(cnt) < 15000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    cv2.imshow('arr', arr)
    k = cv2.waitKey(0)
    return arr
项目:Artificial-Potential-Field    作者:vampcoder    | 项目源码 | 文件源码
def classify(img):
    cimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img2 = cv2.medianBlur(cimg, 13)

    ret, thresh1 = cv2.threshold(cimg, 100, 120, cv2.THRESH_BINARY)
    t2 = copy.copy(thresh1)

    x, y = thresh1.shape
    arr = np.zeros((x, y, 3), np.uint8)
    final_contours = []
    image, contours, hierarchy = cv2.findContours(t2, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    #cv2.imshow('image', image)
    #k = cv2.waitKey(0)
    for i in range(len(contours)):
        cnt = contours[i]
        if cv2.contourArea(cnt) > 3600 and cv2.contourArea(cnt) < 25000:
            cv2.drawContours(img, [cnt], -1, [0, 255, 255])
            cv2.fillConvexPoly(arr, cnt, [255, 255, 255])
            final_contours.append(cnt)
    cv2.imshow('arr', arr)
    k = cv2.waitKey(0)
    return arr
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def visualize_contours(name, small, cinfo_list):

    regions = np.zeros_like(small)

    for j, cinfo in enumerate(cinfo_list):

        cv2.drawContours(regions, [cinfo.contour], 0,
                         CCOLORS[j % len(CCOLORS)], -1)

    mask = (regions.max(axis=2) != 0)

    display = small.copy()
    display[mask] = (display[mask]/2) + (regions[mask]/2)

    for j, cinfo in enumerate(cinfo_list):
        color = CCOLORS[j % len(CCOLORS)]
        color = tuple([c/4 for c in color])

        cv2.circle(display, fltp(cinfo.center), 3,
                   (255, 255, 255), 1, cv2.LINE_AA)

        cv2.line(display, fltp(cinfo.point0), fltp(cinfo.point1),
                 (255, 255, 255), 1, cv2.LINE_AA)

    debug_show(name, 1, 'contours', display)
项目:page_dewarp    作者:mzucker    | 项目源码 | 文件源码
def visualize_spans(name, small, pagemask, spans):

    regions = np.zeros_like(small)

    for i, span in enumerate(spans):
        contours = [cinfo.contour for cinfo in span]
        cv2.drawContours(regions, contours, -1,
                         CCOLORS[i*3 % len(CCOLORS)], -1)

    mask = (regions.max(axis=2) != 0)

    display = small.copy()
    display[mask] = (display[mask]/2) + (regions[mask]/2)
    display[pagemask == 0] /= 4

    debug_show(name, 2, 'spans', display)
项目:histonets-cv    作者:sul-cidr    | 项目源码 | 文件源码
def remove_blobs(image, min_area=0, max_area=sys.maxsize, threshold=128,
                 method='8-connected', return_mask=False):
    """Binarize image using threshold, and remove (turn into black)
    blobs of connected pixels of white of size bigger or equal than
    min_area but smaller or equal than max_area from the original image,
    returning it afterward."""
    method = method.lower()
    if method == '4-connected':
        method = cv2.LINE_4
    elif method in ('16-connected', 'antialiased'):
        method = cv2.LINE_AA
    else:  # 8-connected
        method = cv2.LINE_8
    mono_image = binarize_image(image, method='boolean', threshold=threshold)
    _, all_contours, _ = cv2.findContours(mono_image, cv2.RETR_LIST,
                                          cv2.CHAIN_APPROX_SIMPLE)
    contours = np.array([contour for contour in all_contours
                         if min_area <= cv2.contourArea(contour) <= max_area])
    mask = np.ones(mono_image.shape, np.uint8)
    cv2.drawContours(mask, contours, -1, 0, -1, lineType=method)
    return image, 255 * mask
项目:PAN-Card-OCR    作者:dilippuri    | 项目源码 | 文件源码
def remove_border(contour, ary):
    """Remove everything outside a border contour."""
    # Use a rotated rectangle (should be a good approximation of a border).
    # If it's far from a right angle, it's probably two sides of a border and
    # we should use the bounding box instead.
    c_im = np.zeros(ary.shape)
    r = cv2.minAreaRect(contour)
    degs = r[2]
    if angle_from_right(degs) <= 10.0:
        box = cv2.cv.BoxPoints(r)
        box = np.int0(box)
        cv2.drawContours(c_im, [box], 0, 255, -1)
        cv2.drawContours(c_im, [box], 0, 0, 4)
    else:
        x1, y1, x2, y2 = cv2.boundingRect(contour)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 255, -1)
        cv2.rectangle(c_im, (x1, y1), (x2, y2), 0, 4)

    return np.minimum(c_im, ary)
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def findContours(arg_img,arg_canvas, arg_MinMaxArea=False, arg_debug= False):
    image= arg_img.copy()
    #print image
    canvas= arg_canvas.copy()
    if len(image)==3:
        image = cv2.cvtColor(self.image, cv2.COLOR_GRAY2BGR)
    if sys.version_info.major == 2: 
        ctrs, hier = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    else:
        _, ctrs, hier = cv2.findContours(image.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    if arg_MinMaxArea is not False:
        ctrs = filter(lambda x : arg_MinMaxArea[1]> cv2.contourArea(x) > arg_MinMaxArea[0] , ctrs)

    print '>>> ', len(ctrs)
    for ctr in ctrs:
        print 'Area: ', cv2.contourArea(ctr)
        cv2.drawContours(canvas, [ctr], 0, (0, 128, 255), 3)
    if arg_debug:
        cv2.imwrite('Debug/debug_findContours.jpg',canvas)
    return canvas
项目:Farmbot_GeneralAP    作者:SpongeYao    | 项目源码 | 文件源码
def get_contour(self, arg_frame, arg_export_index, arg_export_path, arg_export_filename, arg_binaryMethod):
        # Otsu's thresholding after Gaussian filtering
        tmp = cv2.cvtColor(arg_frame, cv2.COLOR_RGB2GRAY)
        blur = cv2.GaussianBlur(tmp,(5,5),0)
        if arg_binaryMethod== 0:
            ret, thresholdedImg= cv2.threshold(blur.copy() , self.threshold_graylevel, 255 , 0)
        elif arg_binaryMethod == 1:
            ret,thresholdedImg = cv2.threshold(blur.copy(),0 ,255 ,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
        elif arg_binaryMethod== 2:
            thresholdedImg = cv2.adaptiveThreshold(blur.copy(),255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,5,0)

        result = cv2.cvtColor(thresholdedImg, cv2.COLOR_GRAY2RGB)
        ctrs, hier = cv2.findContours(thresholdedImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        ctrs = filter(lambda x : cv2.contourArea(x) > self.threshold_size , ctrs)

        rects = [[cv2.boundingRect(ctr) , ctr] for ctr in ctrs]

        for rect , cntr in rects:
            cv2.drawContours(result, [cntr], 0, (0, 128, 255), 3)
        if arg_export_index:
            cv2.imwrite(arg_export_path+ arg_export_filename+'.jpg', result)
        print "Get Contour success"
        return result
项目:AMBR    作者:Algomorph    | 项目源码 | 文件源码
def draw_silhouette(self, foreground, bin_mask, tracked_object_stats, centroid):
        contours = cv2.findContours(bin_mask, mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)[1]
        for i_contour in range(0, len(contours)):
            cv2.drawContours(foreground, contours, i_contour, (0, 255, 0))
        x1 = tracked_object_stats[cv2.CC_STAT_LEFT]
        x2 = x1 + tracked_object_stats[cv2.CC_STAT_WIDTH]+1
        y1 = tracked_object_stats[cv2.CC_STAT_TOP]
        y2 = y1 + tracked_object_stats[cv2.CC_STAT_HEIGHT]+1
        if SilhouetteExtractor.DRAW_BBOX:
            cv2.rectangle(foreground, (x1, y1), (x2, y2), color=(0, 0, 255))
            cv2.drawMarker(foreground, SilhouetteExtractor.__to_int_tuple(centroid), (0, 0, 255), cv2.MARKER_CROSS, 11)
            bbox_w_h_ratio = tracked_object_stats[cv2.CC_STAT_WIDTH] / tracked_object_stats[cv2.CC_STAT_HEIGHT]
            cv2.putText(foreground, "BBOX w/h ratio: {0:.4f}".format(bbox_w_h_ratio), (x1, y1 - 18),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
        if SilhouetteExtractor.SHOW_INTERSECTS:
            if self.intersects_frame_boundary(x1, x2, y1, y2):
                cv2.putText(foreground, "FRAME BORDER INTERSECT DETECTED", (0, 54), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
                            (0, 0, 255))
项目:Stronghold-2016-Vision    作者:team4099    | 项目源码 | 文件源码
def get_kinect_angles(image):
    """
    Gets angle to goal given an opencv image.
    Parameters:
        :param: `image` - an opencv image
    """
    # print(image)
    cv2.imwrite("out/thing.png", image)
    thresholded_image = threshold_image_for_tape(numpy.copy(image))
    cv2.imwrite("out/threshold.png", thresholded_image)
    contours, box = get_contours(thresholded_image)
    # total_image = cv2.drawContours(image, [contours], -1, (0, 0, 0))
    # random_number = str(int(random.random() * 100))
    # print("random number:", random_number)
    # cv2.imwrite("out/total_image" + random_number + ".png", total_image)
    corners = get_corners_from_contours(contours)
    return get_angles_to_goal(get_top_center(corners), image)
项目:Vision_Processing-2016    作者:Sabercat-Robotics-4146-FRC    | 项目源码 | 文件源码
def get_bounding_rect( cap, win_cap, win, upper, lower):
    msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3)
    im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
    if len(contours) > 0:
        areas = [cv2.contourArea(c) for c in contours] # get the area of each contour
        max_index = np.argmax(areas) # get the index of the largest contour by area
        cnts = contours[max_index] # get the largest contout by area
        cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image
        x,y,w,h = cv2.boundingRect(cnts) #  get the bouding box information about the contour
        cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box
        cv2.imshow( "debug.", win_cap )
        try:
            self.smt_dash.putNumber('vis_x', x)
            self.smt_dash.putNumber('vis_y', y)
            self.smt_dash.putNumber('vis_w', w)
            self.smt_dash.putNumber('vis_h', h)
        except Exception:
            pass
项目:Sign-Language-Recognition    作者:Anmol-Singh-Jaggi    | 项目源码 | 文件源码
def draw_contours(frame):
    """
    Draws a contour around white color.
    """
    print("Drawing contour around white color...")

    # 'contours' is a list of contours found.
    contours, _ = cv2.findContours(
        frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

    # Finding the contour with the greatest area.
    largest_contour_index = find_largest_contour_index(contours)

    # Draw the largest contour in the image.
    cv2.drawContours(frame, contours,
                     largest_contour_index, (255, 255, 255), thickness=-1)

    # Draw a rectangle around the contour perimeter
    contour_dimensions = cv2.boundingRect(contours[largest_contour_index])
    # cv2.rectangle(sign_image,(x,y),(x+w,y+h),(255,255,255),0,8)

    print("Done!")
    return (frame, contour_dimensions)
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.03, contour_range=5, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    # if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None: raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.03, contour_range=5, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None: raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = np.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.1, contour_range=7, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None:
        raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:card-scanner    作者:RFVenter    | 项目源码 | 文件源码
def get_contours(image, polydb=0.03, contour_range=7, show=False):
    # find the contours in the edged image, keeping only the largest ones, and initialize the screen contour
    # if cv2version == 3: im2, contours, hierarchy = cv2.findContours(image.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = _findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:contour_range]

    # loop over the contours
    screenCnt = None
    for c in cnts:
        # approximate the contour
        peri = cv2.arcLength(c, True) #finds the Contour Perimeter
        approx = cv2.approxPolyDP(c, polydb * peri, True)

        # if our approximated contour has four points, then we can assume that we have found our screen
        if len(approx) == 4:
            screenCnt = approx
            break

    if screenCnt is None: raise EdgeNotFound()

    # sometimes the algorythm finds a strange non-convex shape. The shape conforms to the card but its not complete, so then just complete the shape into a convex form
    if not cv2.isContourConvex(screenCnt):
        screenCnt = cv2.convexHull(screenCnt)
        x,y,w,h = cv2.boundingRect(screenCnt)
        screenCnt = numpy.array([[[x, y]], [[x+w, y]], [[x+w, y+h]], [[x, y+h]]])

    if show: #this is for debugging puposes
        new_image = image.copy()
        cv2.drawContours(new_image, [screenCnt], -1, (255, 255, 0), 2)
        cv2.imshow("Contour1 image", new_image)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    return screenCnt
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def cropCircle(img):
    '''
    there many imaged taken thresholded, which means many images is
    present as a circle with black surrounded. This function is to
    find the largest inscribed rectangle to the thresholed image and
    then crop the image to the rectangle.

    input: img - the cv2 module

    return: img_crop, rectangle, tile_size
    '''
    if(img.shape[0] > img.shape[1]):
        tile_size = (int(img.shape[1]*256/img.shape[0]),256)
    else:
        tile_size = (256, int(img.shape[0]*256/img.shape[1]))

    img = cv2.resize(img, dsize=tile_size)

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]

    ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)

    return [img_crop, rectangle, tile_size]
项目:intel-cervical-cancer    作者:wangg12    | 项目源码 | 文件源码
def cropCircle(img):
    '''
    there many imaged taken thresholded, which means many images is
    present as a circle with black surrounded. This function is to
    find the largest inscribed rectangle to the thresholed image and
    then crop the image to the rectangle.

    input: img - the cv2 module

    return: img_crop, rectangle, tile_size
    '''
    if(img.shape[0] > img.shape[1]):
        tile_size = (int(img.shape[1]*256/img.shape[0]),256)
    else:
        tile_size = (256, int(img.shape[0]*256/img.shape[1]))

    img = cv2.resize(img, dsize=tile_size)

    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY);
    _, thresh = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY)

    _, contours, _ = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)

    main_contour = sorted(contours, key = cv2.contourArea, reverse = True)[0]

    ff = np.zeros((gray.shape[0],gray.shape[1]), 'uint8')
    cv2.drawContours(ff, main_contour, -1, 1, 15)
    ff_mask = np.zeros((gray.shape[0]+2,gray.shape[1]+2), 'uint8')
    cv2.floodFill(ff, ff_mask, (int(gray.shape[1]/2), int(gray.shape[0]/2)), 1)

    rect = maxRect(ff)
    rectangle = [min(rect[0],rect[2]), max(rect[0],rect[2]), min(rect[1],rect[3]), max(rect[1],rect[3])]
    img_crop = img[rectangle[0]:rectangle[1], rectangle[2]:rectangle[3]]
    cv2.rectangle(ff,(min(rect[1],rect[3]),min(rect[0],rect[2])),(max(rect[1],rect[3]),max(rect[0],rect[2])),3,2)

    return [img_crop, rectangle, tile_size]
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def mask_using_contours(img, contours):
    """
    Return a copy of the supplied image, where all regions outside the supplied contours have been masked to white.

    Args:
        img (np.ndarray): the original image
        contours (list[np.ndarray]): a list of contours to use when masking

    Returns:
        np.ndarray: the masked image
    """
    img = img.copy()
    mask = np.zeros(img.shape, np.uint8)
    cv2.drawContours(mask, contours, contourIdx=-1, color=255, thickness=-1)
    img[np.where(mask == 0)] = 255
    return img
项目:SharkCV    作者:hammerhead226    | 项目源码 | 文件源码
def contours_draw(self, frame, **kwargs):
        if 'start' not in kwargs:
            kwargs['start'] = 0
        if 'end' not in kwargs:
            kwargs['end'] = len(self.contours) - 1
        if 'color' not in kwargs:
            kwargs['color'] = (0, 255, 0)
        if 'width' not in kwargs:
            kwargs['width'] = 2
        contours = [cnt.ndarray for cnt in self.contours][kwargs['start']:kwargs['end'] + 1]
        if len(contours) > 0:
            cv2.drawContours(frame.ndarray, contours, -1, kwargs['color'], kwargs['width'])
            return True
        return False

    # Dilate this mask's white region
项目:Image-Processing-and-Steganogrphy    作者:motkeg    | 项目源码 | 文件源码
def Q2():
    cap = cv2.VideoCapture(0)

    while(cap.isOpened() ):
        ret = cap.set(3,320)
        ret = cap.set(4,240)
        ret, frame = cap.read()

        if ret==True:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            x,thresh = cv2.threshold(gray,137,255,1)
            contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
            cv2.drawContours(frame, contours,-1, (0,255,0), 3)
            cv2.imshow('Image with contours',frame)    
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    cap.release()
    cv2.destroyAllWindows()
项目:FaceSwap    作者:Aravind-Suresh    | 项目源码 | 文件源码
def get_contour_mask(dshape, img_fl):
    mask = np.zeros(dshape)
    hull = cv2.convexHull(img_fl)
    cv2.drawContours(mask, [hull], 0, (1, 1, 1) , -1)
    return np.uint8(mask)

# Orients input_ mask onto tmpl_ face
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def sizeFiltering(contours):
    #this function filters out the smaller retroreflector (as well as any noise) by size
    #_, contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contours, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    if len(contours) == 0:
        print "errorrrrr"
        return 0
    big = contours[0]
    for c in contours:
        if type(c) and type(big) == np.ndarray:
            if cv2.contourArea(c) > cv2.contourArea(big):
                big = c
        else:
            print type(c) and type(big)
            return 0
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, big, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, big, -1, (255, 255, 255))"""
    x,y,w,h = cv2.boundingRect(big)
    """cv2.rectangle(blank_image, (x,y), (x+w, y+h), (255,255,255))
    cv2.imshow("rect", blank_image)
    cv2.waitKey()"""
    return big
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def shapeFiltering(img):
    contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[0]
    if len(contours) == 0:
        return "yoopsie"
    #else:
        #print contours
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contours, -1, (255, 255, 255))
    cv2.imshow("imagiae", blank_image)
    cv2.waitKey()"""
    good_shape = []
    for c in contours:
        x,y,w,h = cv2.boundingRect(c)
        """rect = cv2.minAreaRect(contour)
        box = cv2.boxPoints(rect)
        box = np.int0(box)
        w = """
        #if h == 0:
        #    continue
        ratio = w / h
        ratio_grade = ratio / (TMw / TMh)
        if 0.2 < ratio_grade < 1.8:
            good_shape.append(c)
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, good_shape, -1, (255, 255, 255))
    cv2.imshow("imagia", blank_image)
    cv2.waitKey()"""
    return good_shape
项目:Millennium-Eye    作者:Elysium1937    | 项目源码 | 文件源码
def findCorners(contour):
    """blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)
    cv2.drawContours(blank_image, contour, -1, (255, 255, 255))
    rows,cols = img.shape[0], img.shape[1]
    M = cv2.getRotationMatrix2D((cols/2,rows/2),-45,0.5)
    dst = cv2.warpAffine(blank_image,M,(cols,rows))
    cv2.imshow("rotatio", dst)
    cv2.waitKey()"""
    rect = cv2.minAreaRect(contour)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    height_px_1 = box[0][1] - box[3][1]
    height_px_2 = box[1][1] - box[2][1]
    print height_px_1, height_px_2
    if height_px_1 < height_px_2:
        close_height_px = height_px_2
        far_height_px = height_px_1
    else:
        close_height_px = height_px_1
        far_height_px = height_px_2

    return close_height_px, far_height_px