我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.erode()。
def _extract_spots(self) -> None: # Dilate and Erode to 'clean' the spot (nb that this harms the number itself, so we only do it to extract spots) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) img = cv2.dilate(self._img, kernel, iterations=1) img = cv2.erode(img, kernel, iterations=2) img = cv2.dilate(img, kernel, iterations=1) # Perform a simple blob detect params = cv2.SimpleBlobDetector_Params() params.filterByArea = True params.minArea = 20 # The dot in 20pt font has area of about 30 params.filterByCircularity = True params.minCircularity = 0.7 params.filterByConvexity = True params.minConvexity = 0.8 params.filterByInertia = True params.minInertiaRatio = 0.4 detector = cv2.SimpleBlobDetector_create(params) self.spot_keypoints = detector.detect(img) # Log intermediate image img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))
def apply_filters(self, image, denoise=False): """ This method is used to apply required filters to the to extracted regions of interest. Every square in a sudoku square is considered to be a region of interest, since it can potentially contain a value. """ # Convert to grayscale source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Denoise the grayscale image if requested in the params if denoise: denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13) source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3) # source_blur = denoised_gray else: source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3) source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) source_eroded = cv2.erode(source_thresh, kernel, iterations=1) source_dilated = cv2.dilate(source_eroded, kernel, iterations=1) if ENABLE_PREVIEW_ALL: image_preview(source_dilated) return source_dilated
def calculate_entropy(image): entropy = image.copy() sum = 0 i = 0 j = 0 while i < entropy.shape[0]: j = 0 while j < entropy.shape[1]: sub_image = entropy[i:i+10,j:j+10] histogram = cv2.calcHist([sub_image],[0],None,[256],[0,256]) sum = 0 for k in range(256): if histogram[k] != 0: sum = sum + (histogram[k] * math.log(histogram[k])) k = k + 1 entropy[i:i+10,j:j+10] = sum j = j+10 i = i+10 ret2,th2 = cv2.threshold(entropy,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) newfin = cv2.erode(th2, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) return newfin
def add_blobs(crop_frame): frame=cv2.GaussianBlur(crop_frame, (3, 3), 0) # Convert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # define range of green color in HSV lower_green = np.array([70,50,50]) upper_green = np.array([85,255,255]) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower_green, upper_green) mask = cv2.erode(mask, None, iterations=1) mask = cv2.dilate(mask, None, iterations=1) # Bitwise-AND mask and original image res = cv2.bitwise_and(frame,frame, mask= mask) detector = cv2.SimpleBlobDetector_create(params) # Detect blobs. reversemask=255-mask keypoints = detector.detect(reversemask) if keypoints: print "found blobs" if len(keypoints) > 4: keypoints.sort(key=(lambda s: s.size)) keypoints=keypoints[0:3] # Draw detected blobs as red circles. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) else: print "no blobs" im_with_keypoints=crop_frame return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def dif_gaus(image, lower, upper): lower, upper = int(lower-1), int(upper-1) lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0) upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0) # upper +=50 # lower +=50 dif = lower-upper # dif *= .1 # dif = cv2.medianBlur(dif,3) # dif = 255-dif dif = cv2.inRange(dif, np.asarray(200),np.asarray(256)) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) dif = cv2.dilate(dif, kernel, iterations=2) dif = cv2.erode(dif, kernel, iterations=1) # dif = cv2.max(image,dif) # dif = cv2.dilate(dif, kernel, iterations=1) return dif
def random_z_rotation(rgb, depth, pose, camera): rotation = random.uniform(-180, 180) rotation_matrix = Transform() rotation_matrix.set_rotation(0, 0, math.radians(rotation)) pixel = center_pixel(pose, camera) new_rgb = rotate_image(rgb, rotation, pixel[0]) new_depth = rotate_image(depth, rotation, pixel[0]) # treshold below 50 means we remove some interpolation noise, which cover small holes mask = (new_depth >= 50).astype(np.uint8)[:, :, np.newaxis] rgb_mask = np.all(new_rgb != 0, axis=2).astype(np.uint8) kernel = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], np.uint8) # erode rest of interpolation noise which will affect negatively future blendings eroded_mask = cv2.erode(mask, kernel, iterations=2) eroded_rgb_mask = cv2.erode(rgb_mask, kernel, iterations=2) new_depth = new_depth * eroded_mask new_rgb = new_rgb * eroded_rgb_mask[:, :, np.newaxis] new_pose = combine_view_transform(pose, rotation_matrix) return new_rgb, new_depth, new_pose
def enhance(img,blockSize=8,boxSize=4): """image enhancement return: enhanced image """ # img=cv2.equalizeHist(np.uint8(img)) img,imgfore=segmentation(img) # img=blockproc(np.uint8(img),cv2.equalizeHist,(16,16)) img=img.copy(order='C').astype(np.float64) theta=_pre.calcDirectionBox(img,blockSize,boxSize) wl=calcWlBox(img,blockSize,boxSize) sigma=5 img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma) img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma) img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma) img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma) img=_pre.GaborFilterBox(img,blockSize,boxSize,wl,np.pi/2-theta,sigma) img=np.asarray(img) imgfore=cv2.erode(imgfore,np.ones((8,8)),iterations=4) img[np.where(imgfore==0)]=255 img=basic.truncate(img,method='default') return img,imgfore
def _do_filter(self, frame): ''' Process a single frame. ''' # blur to reduce noise frame = cv2.GaussianBlur(frame, (5, 5), 0, borderType=cv2.BORDER_CONSTANT) # threshold to find contiguous regions of "bright" pixels # ignore all "dark" (<1/8 max) pixels max = numpy.max(frame) min = numpy.min(frame) # if the frame is completely dark, then just return it if max == min: return frame threshold = min + (max - min) / 8 _, frame = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY) # filter out single pixels and other noise frame = cv2.erode(frame, self._element_shrink) # restore and join nearby regions (in case one fish has a skinny middle...) frame = cv2.dilate(frame, self._element_grow) return frame
def __init__(self): super(TargetFilterBGSub, self).__init__() # background subtractor #self._bgs = cv2.BackgroundSubtractorMOG() #self._bgs = cv2.BackgroundSubtractorMOG2() # not great defaults, and need bShadowDetection to be False #self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20) # varThreshold: higher values detect fewer/smaller changed regions self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False) # ??? history is ignored? Only if learning_rate is > 0, or...? Unclear. # Learning rate for background subtractor. # 0 = never adapts after initial background creation. # A bit above 0 looks good. # Lower values are better for detecting slower movement, though it # takes a bit of time to learn the background initially. self._learning_rate = 0.001 # elements to reuse in erode/dilate # CROSS elimates more horizontal/vertical lines and leaves more # blobs with extent in both axes [than RECT]. self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5)) self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def checkAvailability(sift, tkp, tdes, matchimg): """ :param sift: :param tkp: :param tdes:sift feature object, template keypoints, and template descriptor :param matchimg: :return: """ qimg = cv2.imread(matchimg) qimggray = cv2.cvtColor(qimg,cv2.COLOR_BGR2GRAY) # kernel = np.ones((5,5), np.uint8) # qimggray = cv2.erode(qimggray, kernel, iterations=1) # ret,threshimg = cv2.threshold(qimggray,100,255,cv2.THRESH_BINARY) qkp,qdes = sift.detectAndCompute(qimggray, None) # plt.imshow(threshimg, 'gray'), plt.show() FLANN_INDEX_KDITREE=0 index_params=dict(algorithm=FLANN_INDEX_KDITREE,tree=5) # FLANN_INDEX_LSH = 6 # index_params = dict(algorithm=FLANN_INDEX_LSH, # table_number=12, # 12 # key_size=20, # 20 # multi_probe_level=2) # 2 search_params = dict(checks = 50) flann=cv2.FlannBasedMatcher(index_params,search_params) matches=flann.knnMatch(tdes,qdes,k=2) goodMatch=[] for m_n in matches: if len(m_n) != 2: continue m, n = m_n if(m.distance<0.75*n.distance): goodMatch.append(m) MIN_MATCH_COUNT = 30 if (len(goodMatch) >= MIN_MATCH_COUNT): tp = [] qp = [] for m in goodMatch: tp.append(tkp[m.queryIdx].pt) qp.append(qkp[m.trainIdx].pt) tp, qp = np.float32((tp, qp)) H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0) h = timg.shape[0] w = timg.shape[1] trainBorder = np.float32([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]]) queryBorder = cv2.perspectiveTransform(trainBorder, H) cv2.polylines(qimg, [np.int32(queryBorder)], True, (0, 255, 0), 5) cv2.imshow('result', qimg) plt.imshow(qimg, 'gray'), plt.show() return True else: print "Not Enough match found- %d/%d" % (len(goodMatch), MIN_MATCH_COUNT) return False # cv2.imshow('result', qimg) # if cv2.waitKey(10) == ord('q'): # cv2.destroyAllWindows()
def process(img): img=cv2.medianBlur(img,5) kernel=np.ones((3,3),np.uint8) #img=cv2.erode(img,kernel,iterations = 1) sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3) element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1)) element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) dilation = cv2.dilate(sobel, element2, iterations = 1) erosion = cv2.erode(dilation, element1, iterations = 1) dilation2 = cv2.dilate(erosion, element2,iterations = 3) #img=cv2.dilate(img,kernel,iterations = 1) #img=cv2.Canny(img,100,200) return dilation2
def logoDetect(img,imgo): '''???????????????''' imglogo=imgo.copy() img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC) #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3) ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9) img=cv2.Canny(img,100,200) element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) img = cv2.dilate(img, element2,iterations = 1) img = cv2.erode(img, element1, iterations = 3) img = cv2.dilate(img, element2,iterations = 3) #???? im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) tema=0 result=[] for con in contours: x,y,w,h=cv2.boundingRect(con) area=w*h ratio=max(w/h,h/w) if area>300 and area<20000 and ratio<2: if area>tema: tema=area result=[x,y,w,h] ratio2=ratio #?????????????????,?????????? logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)] logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3] cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2) cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2) print tema,ratio2,result logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]] cv2.imwrite('./logo2.jpg',logo2) return img
def hsvModer(self, index, hsv_valueT, hsv_value_B): img_BGR = self.img[index] img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB) img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV) lower_red = np.array(hsv_value_B) upper_red = np.array(hsv_valueT) mask = cv2.inRange(img_HSV, lower_red, upper_red) res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask) if self.erosion: kernel = np.ones((5, 5), np.uint8) res = cv2.erode(res, kernel, iterations=1) if self.dilate: kernel = np.ones((9, 9), np.uint8) res = cv2.dilate(res, kernel, iterations=1) return res
def find_lines(img, acc_threshold=0.25, should_erode=True): if len(img.shape) == 3 and img.shape[2] == 3: # if it's color img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) img = cv2.GaussianBlur(img, (11, 11), 0) img = cv2.adaptiveThreshold( img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 5, 2) img = cv2.bitwise_not(img) # thresh = 127 # edges = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1] # edges = cv2.Canny(blur, 500, 500, apertureSize=3) if should_erode: element = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 4)) img = cv2.erode(img, element) theta = np.pi/2000 angle_threshold = 2 horizontal = cv2.HoughLines( img, 1, theta, int(acc_threshold * img.shape[1]), min_theta=np.radians(90 - angle_threshold), max_theta=np.radians(90 + angle_threshold)) vertical = cv2.HoughLines( img, 1, theta, int(acc_threshold * img.shape[0]), min_theta=np.radians(-angle_threshold), max_theta=np.radians(angle_threshold), ) horizontal = list(horizontal) if horizontal is not None else [] vertical = list(vertical) if vertical is not None else [] horizontal = [line[0] for line in horizontal] vertical = [line[0] for line in vertical] horizontal = np.asarray(horizontal) vertical = np.asarray(vertical) return horizontal, vertical
def postprocess_colormap(cls, postprocess=True): """Create a colormap out of the classes and postprocess the face.""" batch = vs.apply_colormap(cls, vmin=0, vmax=21, cmap=CMAP) cmap = vs.apply_colormap(np.array(range(22), dtype='uint8'), vmin=0, vmax=21, cmap=CMAP) COLSET = cmap[18:22] FCOL = cmap[11] if postprocess: kernel = np.ones((2, 2), dtype=np.uint8) for im in batch: for col in COLSET: # Extract the map of the matching color. colmap = np.all(im == col, axis=2).astype(np.uint8) # Erode. while np.sum(colmap) > 10: colmap = cv2.erode(colmap, kernel) # Prepare the original map for remapping. im[np.all(im == col, axis=2)] = FCOL # Backproject. im[colmap == 1] = col return batch[:, :, :, :3]
def img_pre_treatment(file_path): im = cv2.imread(file_path) resize_pic=cv2.resize(im,(640,480),interpolation=cv2.INTER_CUBIC) resize_pic = cv2.GaussianBlur(resize_pic,(5,5),0) cv2.imwrite('static/InterceptedIMG/resize.jpg',resize_pic) kernel = np.ones((3,3),np.uint8) resize_pic = cv2.erode(resize_pic,kernel,iterations = 3) resize_pic = cv2.dilate(resize_pic,kernel,iterations = 3) cv2.imshow('image',resize_pic) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() gray = cv2.cvtColor(resize_pic,cv2.COLOR_BGR2GRAY) ret, binary = cv2.threshold(gray,90,255,cv2.THRESH_BINARY) cv2.imshow('image',binary) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() return resize_pic,binary
def contrast_image(image, thresh1=180, thresh2=200, show=False): image = imutils.resize(image, height=scale_factor) # convert it to grayscale, and blur it slightly gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray2 = cv2.GaussianBlur(gray, (5, 5), 0) # threshold the image, then perform a series of erosions + dilations to remove any small regions of noise thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1] thresh2 = cv2.erode(thresh, None, iterations=2) thresh3 = cv2.dilate(thresh2, None, iterations=2) if show is True: #this is for debugging puposes cv2.imshow("Contrast", thresh3) cv2.waitKey(0) cv2.destroyAllWindows() return thresh
def process_letter(thresh,output): # assign the kernel size kernel = np.ones((2,1), np.uint8) # vertical # use closing morph operation then erode to narrow the image temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3) # temp_img = cv2.erode(thresh,kernel,iterations=2) letter_img = cv2.erode(temp_img,kernel,iterations=1) # find contours (contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) # loop in all the contour areas for cnt in contours: x,y,w,h = cv2.boundingRect(cnt) cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1) return output #processing letter by letter boxing
def process_word(thresh,output): # assign 2 rectangle kernel size 1 vertical and the other will be horizontal kernel = np.ones((2,1), np.uint8) kernel2 = np.ones((1,4), np.uint8) # use closing morph operation but fewer iterations than the letter then erode to narrow the image temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2) #temp_img = cv2.erode(thresh,kernel,iterations=2) word_img = cv2.dilate(temp_img,kernel2,iterations=1) (contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x,y,w,h = cv2.boundingRect(cnt) cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1) return output #processing line by line boxing
def process_line(thresh,output): # assign a rectangle kernel size 1 vertical and the other will be horizontal kernel = np.ones((1,5), np.uint8) kernel2 = np.ones((2,4), np.uint8) # use closing morph operation but fewer iterations than the letter then erode to narrow the image temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2) #temp_img = cv2.erode(thresh,kernel,iterations=2) line_img = cv2.dilate(temp_img,kernel,iterations=5) (contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x,y,w,h = cv2.boundingRect(cnt) cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1) return output #processing par by par boxing
def process_image(image): """ Args: image: The image to process Returns: sub_image: The rotated and extracted. """ # Convert image to black and white - we cannot take the photos in black and white as we # must first search for the red triangle. if len(image.shape) == 3: processed_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) else: processed_img = image if config.real_hardware: num_iterations = 8 else: num_iterations = 8 processed_img = cv2.GaussianBlur(processed_img, (21, 21), 0) _, processed_img = cv2.threshold(processed_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) # Put a border around the image to stop the edges of the images creating artifacts. padded_image = np.zeros((processed_img.shape[0] + 10, processed_img.shape[1] + 10), np.uint8) padded_image[5:processed_img.shape[0]+5, 5:processed_img.shape[1]+5] = processed_img kernel = np.array([[0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0]], np.uint8) padded_image = cv2.erode(padded_image, kernel, iterations=num_iterations) processed_img = padded_image[25:padded_image.shape[0] - 25, 25:padded_image.shape[1] - 25] #cv2.imshow('Padded Image', padded_image) #cv2.imshow('Processed image', processed_img) #cv2.waitKey(0) # Debugging code - useful to show the images are being eroded correctly. #spacer = processed_img[:, 0:2].copy() #spacer.fill(100) #combined_image = np.concatenate((processed_img, spacer), axis=1) #combined_image = np.concatenate((combined_image, image), axis=1) #cv2.imshow('PreProcessed and Processed Image', combined_image) #cv2.waitKey(0) # Save sub_image to debug folder if required. if __debug__: iadebug.save_processed_image(processed_img) return processed_img
def detect_shirt(self): #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8)) self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8)) cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY) fg=cv2.erode(self.dst,None,iterations=2) #cv2.imshow("fore",fg) bg=cv2.dilate(self.dst,None,iterations=3) _,bg=cv2.threshold(bg, 1,128,1) #cv2.imshow("back",bg) mark=cv2.add(fg,bg) mark32=np.int32(mark) cv2.watershed(self.norm_rgb,mark32) self.m=cv2.convertScaleAbs(mark32) _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #cv2.imshow("final_tshirt",self.m) cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE) return self.m,cntr
def movement(mat_1,mat_2): mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY) mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1)) _,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0) mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY) mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1)) _,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0) mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray) mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2)) _,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0) mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval))) mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4))) _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0:return True #If there were any movements return False #if not #Pedestrian Recognition Thread
def get_init_process_img(roi_img): """ ????????????????????????????????????? :param roi_img: ndarray :return: ndarray """ h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1) v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1) img = cv2.add(h, v) img = cv2.convertScaleAbs(img) img = cv2.GaussianBlur(img, (3, 3), 0) ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY) kernel = np.ones((1, 1), np.uint8) img = cv2.erode(img, kernel, iterations=1) img = cv2.dilate(img, kernel, iterations=2) img = cv2.erode(img, kernel, iterations=1) img = cv2.dilate(img, kernel, iterations=2) img = auto_canny(img) return img
def erode(im, iterations=1): return cv2.erode(im, None, iterations=iterations)
def erode_dilate(im, iterations=1): return dilate(erode(im, iterations=iterations), iterations)
def dilate_erode(im, iterations=1): return erode(dilate(im, iterations=iterations), iterations)
def execute_Morphing(proxy,obj): try: img=obj.sourceObject.Proxy.img.copy() except: img=cv2.imread(__dir__+'/icons/freek.png') ks=obj.kernel kernel = np.ones((ks,ks),np.uint8) if obj.filter == 'dilation': dilation = cv2.dilate(img,kernel,iterations = 1) img=dilation if obj.filter == 'erosion': dilation = cv2.erode(img,kernel,iterations = 1) img=dilation if obj.filter == 'opening': dilation = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) img=dilation if obj.filter == 'closing': dilation = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel) img=dilation obj.Proxy.img = img # # property functions for HoughLines #
def checkForSkin(IMG10): high,widt=IMG10.shape[:2] B1=np.reshape(np.float32(IMG10[:,:,0]),high*widt)#B G1=np.reshape(np.float32(IMG10[:,:,1]),high*widt)#G R1=np.reshape(np.float32(IMG10[:,:,2]),high*widt)#Rs #print high,widt h3=np.zeros((high,widt,3),np.uint8) #cv2.imshow("onetime",h) tem=np.logical_and(np.logical_and(np.logical_and(np.logical_and(R1 > 95, G1 > 40),np.logical_and(B1 > 20, (np.maximum(np.maximum(R1,G1),B1) - np.minimum(np.minimum(R1,G1),B1)) > 15)),R1>B1),np.logical_and(np.absolute(R1-G1) > 15,R1>G1)) h5=np.array(tem).astype(np.uint8,order='C',casting='unsafe') h5=np.reshape(h5,(high,widt)) h3[:,:,0]=h5 h3[:,:,1]=h5 h3[:,:,2]=h5 #cv2.imshow("thirdtime",h3) kernel1 = np.ones((3,3),np.uint8) closedH3=np.copy(h3) for i in range(5): closedH3 = cv2.erode(closedH3,kernel1) for i in range(5): closedH3 = cv2.dilate(closedH3,kernel1) #cv2.imshow("closedH3",closedH3) # closedH3 = cv2.cvtColor(closedH3, cv2.COLOR_BGR2RGB) return closedH3
def find(self, image): hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1]) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] if len(contours) == 0: return (False, False) largest_contour = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(largest_contour) M = cv2.moments(largest_contour) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) return (center, radius)
def convert_to_linedrawing(self, luminous_image_data): kernel = numpy.ones((3, 3), numpy.uint8) linedrawing = cv2.Canny(luminous_image_data, 5, 125) linedrawing = cv2.bitwise_not(linedrawing) linedrawing = cv2.erode(linedrawing, kernel, iterations=1) linedrawing = cv2.dilate(linedrawing, kernel, iterations=1) return linedrawing
def morphology(msk): assert isinstance(msk, numpy.ndarray), 'msk must be a numpy array' assert msk.ndim == 2, 'msk must be a greyscale image' kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) msk = cv2.erode(msk, kernel, iterations=1) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) msk = cv2.morphologyEx(msk, cv2.MORPH_CLOSE, kernel) msk[msk < 128] = 0 msk[msk > 127] = 255 return msk
def build_mask(self, image): """ Build the mask to find the path edges """ kernel = np.ones((3, 3), np.uint8) img = cv2.bilateralFilter(image, 9, 75, 75) img = cv2.erode(img, kernel, iterations=1) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, self.lower_gray, self.upper_gray) mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) mask2 = cv2.erode(mask2, kernel) mask2 = cv2.dilate(mask2, kernel, iterations=1) return mask2
def __cv_erode(src, kernel, anchor, iterations, border_type, border_value): """Expands area of lower value in an image. Args: src: A numpy.ndarray. kernel: The kernel for erosion. A numpy.ndarray. iterations: the number of times to erode. border_type: Opencv enum that represents a border type. border_value: value to be used for a constant border. Returns: A numpy.ndarray after erosion. """ return cv2.erode(src, kernel, anchor, iterations = (int) (iterations +0.5), borderType = border_type, borderValue = border_value)
def extract_bv(image): clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) contrast_enhanced_green_fundus = clahe.apply(image) # applying alternate sequential filtering (3 times closing opening) r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)), iterations = 1) r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11)), iterations = 1) r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(23,23)), iterations = 1) f4 = cv2.subtract(R3,contrast_enhanced_green_fundus) f5 = clahe.apply(f4) # removing very small contours through area parameter noise removal ret,f6 = cv2.threshold(f5,15,255,cv2.THRESH_BINARY) mask = np.ones(f5.shape[:2], dtype="uint8") * 255 im2, contours, hierarchy = cv2.findContours(f6.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt) <= 200: cv2.drawContours(mask, [cnt], -1, 0, -1) im = cv2.bitwise_and(f5, f5, mask=mask) ret,fin = cv2.threshold(im,15,255,cv2.THRESH_BINARY_INV) newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) # removing blobs of microaneurysm & unwanted bigger chunks taking in consideration they are not straight lines like blood # vessels and also in an interval of area fundus_eroded = cv2.bitwise_not(newfin) xmask = np.ones(image.shape[:2], dtype="uint8") * 255 x1, xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) for cnt in xcontours: shape = "unidentified" peri = cv2.arcLength(cnt, True) approx = cv2.approxPolyDP(cnt, 0.04 * peri, False) if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100: shape = "circle" else: shape = "veins" if(shape=="circle"): cv2.drawContours(xmask, [cnt], -1, 0, -1) finimage = cv2.bitwise_and(fundus_eroded,fundus_eroded,mask=xmask) blood_vessels = cv2.bitwise_not(finimage) dilated = cv2.erode(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7)), iterations=1) #dilated1 = cv2.dilate(blood_vessels, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)), iterations=1) blood_vessels_1 = cv2.bitwise_not(dilated) return blood_vessels_1
def skin_filter(cfg, vd): df = pd.read_csv(vd.photo_csv, index_col=0) numbers = df.number.tolist() notface = [] for number in numbers: lower = np.array([0, 48, 80], dtype = "uint8") upper = np.array([13, 255, 255], dtype = "uint8") image = cv2.imread('%s/%d.png' % (vd.photo_dir, number), cv2.IMREAD_COLOR) converted = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) skinMask = cv2.inRange(converted, lower, upper) # apply a series of erosions and dilations to the mask # using an elliptical kernel kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)) skinMask = cv2.erode(skinMask, kernel, iterations = 2) skinMask = cv2.dilate(skinMask, kernel, iterations = 2) # blur the mask to help remove noise, then apply the # mask to the frame skinMask = cv2.GaussianBlur(skinMask, (3, 3), 0) skin = cv2.bitwise_and(image, image, mask = skinMask) if len(skin.nonzero()[0]) < cfg.min_skin_pixels: notface.append(number) print '%d/%d are faces' % ( len(df) - len(notface), len(df) ) df['face']= 1 df.loc[df.number.isin(notface),'face'] = -99 df.to_csv(vd.photo_csv)
def makeMask(hsv_frame, color_Range): mask = cv2.inRange( hsv_frame, color_Range[0], color_Range[1]) # Morphosis next ... eroded = cv2.erode( mask, kernel, iterations=1) dilated = cv2.dilate( eroded, kernel, iterations=1) return dilated # Contours on the mask are detected.. Only those lying in the previously set area # range are filtered out and the centroid of the largest of these is drawn and returned
def calibrateColor(color, def_range): global kernel name = 'Calibrate '+ color cv2.namedWindow(name) cv2.createTrackbar('Hue', name, 0, 180, nothing) cv2.createTrackbar('Sat', name, 0, 255, nothing) cv2.createTrackbar('Val', name, 0, 255, nothing) while(1): ret , frameinv = cap.read() frame=cv2.flip(frameinv ,1) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hue = cv2.getTrackbarPos('Hue', name) sat = cv2.getTrackbarPos('Sat', name) val = cv2.getTrackbarPos('Val', name) lower = np.array([hue-20,sat,val]) upper = np.array([hue+20,255,255]) mask = cv2.inRange(hsv, lower, upper) eroded = cv2.erode( mask, kernel, iterations=1) dilated = cv2.dilate( eroded, kernel, iterations=1) cv2.imshow(name, dilated) k = cv2.waitKey(5) & 0xFF if k == ord(' '): cv2.destroyWindow(name) return np.array([[hue-20,sat,val],[hue+20,255,255]]) elif k == ord('d'): cv2.destroyWindow(name) return def_range