我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.dilate()。
def find_squares(img): img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02 * cnt_len, True) if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos(cnt[i], cnt[(i + 1) % 4], cnt[(i + 2) % 4]) for i in xrange(4)]) if max_cos < 0.1: squares.append(cnt) return squares
def _extract_spots(self) -> None: # Dilate and Erode to 'clean' the spot (nb that this harms the number itself, so we only do it to extract spots) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) img = cv2.dilate(self._img, kernel, iterations=1) img = cv2.erode(img, kernel, iterations=2) img = cv2.dilate(img, kernel, iterations=1) # Perform a simple blob detect params = cv2.SimpleBlobDetector_Params() params.filterByArea = True params.minArea = 20 # The dot in 20pt font has area of about 30 params.filterByCircularity = True params.minCircularity = 0.7 params.filterByConvexity = True params.minConvexity = 0.8 params.filterByInertia = True params.minInertiaRatio = 0.4 detector = cv2.SimpleBlobDetector_create(params) self.spot_keypoints = detector.detect(img) # Log intermediate image img_with_keypoints = cv2.drawKeypoints(img, self.spot_keypoints, outImage=np.array([]), color=(0, 0, 255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) self.intermediate_images.append(NamedImage(img_with_keypoints, 'Spot Detection Image'))
def apply_filters(self, image, denoise=False): """ This method is used to apply required filters to the to extracted regions of interest. Every square in a sudoku square is considered to be a region of interest, since it can potentially contain a value. """ # Convert to grayscale source_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # Denoise the grayscale image if requested in the params if denoise: denoised_gray = cv2.fastNlMeansDenoising(source_gray, None, 9, 13) source_blur = cv2.GaussianBlur(denoised_gray, BLUR_KERNEL_SIZE, 3) # source_blur = denoised_gray else: source_blur = cv2.GaussianBlur(source_gray, (3, 3), 3) source_thresh = cv2.adaptiveThreshold(source_blur, 255, 0, 1, 5, 2) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) source_eroded = cv2.erode(source_thresh, kernel, iterations=1) source_dilated = cv2.dilate(source_eroded, kernel, iterations=1) if ENABLE_PREVIEW_ALL: image_preview(source_dilated) return source_dilated
def find_squares(img): img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: _retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) contours, _hierarchy = find_contours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x, y, w, h = cv2.boundingRect(cnt) cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) area = cv2.contourArea(cnt) if len(cnt) == 4 and 20 < area < 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < 0.1: if (1 - (float(w) / float(h)) <= 0.07 and 1 - (float(h) / float(w)) <= 0.07): squares.append(cnt) return squares
def find_squares(img, cos_limit = 0.1): print('search for squares with threshold %f' % cos_limit) img = cv2.GaussianBlur(img, (5, 5), 0) squares = [] for gray in cv2.split(img): for thrs in xrange(0, 255, 26): if thrs == 0: bin = cv2.Canny(gray, 0, 50, apertureSize=5) bin = cv2.dilate(bin, None) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) bin, contours, hierarchy = cv2.findContours(bin, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) if len(cnt) == 4 and cv2.contourArea(cnt) > 1000 and cv2.isContourConvex(cnt): cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < cos_limit : squares.append(cnt) else: #print('dropped a square with max_cos %f' % max_cos) pass return squares ### ### Version V2. Collect meta-data along the way, with commentary added. ###
def add_blobs(crop_frame): frame=cv2.GaussianBlur(crop_frame, (3, 3), 0) # Convert BGR to HSV hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # define range of green color in HSV lower_green = np.array([70,50,50]) upper_green = np.array([85,255,255]) # Threshold the HSV image to get only blue colors mask = cv2.inRange(hsv, lower_green, upper_green) mask = cv2.erode(mask, None, iterations=1) mask = cv2.dilate(mask, None, iterations=1) # Bitwise-AND mask and original image res = cv2.bitwise_and(frame,frame, mask= mask) detector = cv2.SimpleBlobDetector_create(params) # Detect blobs. reversemask=255-mask keypoints = detector.detect(reversemask) if keypoints: print "found blobs" if len(keypoints) > 4: keypoints.sort(key=(lambda s: s.size)) keypoints=keypoints[0:3] # Draw detected blobs as red circles. # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) else: print "no blobs" im_with_keypoints=crop_frame return im_with_keypoints #, max_blob_dist, blob_center, keypoint_in_orders
def read_images( filenames, domain=None, image_size=64): images = [] for fn in filenames: image = cv2.imread(fn) if image is None: continue if domain == 'A': kernel = np.ones((3,3), np.uint8) image = image[:, :256, :] image = 255. - image image = cv2.dilate( image, kernel, iterations=1 ) image = 255. - image elif domain == 'B': image = image[:, 256:, :] image = cv2.resize(image, (image_size,image_size)) image = image.astype(np.float32) / 255. image = image.transpose(2,0,1) images.append( image ) images = np.stack( images ) return images
def dif_gaus(image, lower, upper): lower, upper = int(lower-1), int(upper-1) lower = cv2.GaussianBlur(image,ksize=(lower,lower),sigmaX=0) upper = cv2.GaussianBlur(image,ksize=(upper,upper),sigmaX=0) # upper +=50 # lower +=50 dif = lower-upper # dif *= .1 # dif = cv2.medianBlur(dif,3) # dif = 255-dif dif = cv2.inRange(dif, np.asarray(200),np.asarray(256)) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) dif = cv2.dilate(dif, kernel, iterations=2) dif = cv2.erode(dif, kernel, iterations=1) # dif = cv2.max(image,dif) # dif = cv2.dilate(dif, kernel, iterations=1) return dif
def erase_specular(image,lower_threshold=0.0, upper_threshold=150.0): """erase_specular: removes specular reflections within given threshold using a binary mask (hi_mask) """ thresh = cv2.inRange(image, np.asarray(float(lower_threshold)), np.asarray(256.0)) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7)) hi_mask = cv2.dilate(thresh, kernel, iterations=2) specular = cv2.inpaint(image, hi_mask, 2, flags=cv2.INPAINT_TELEA) # return cv2.max(hi_mask,image) return specular
def skin_detect(self, raw_yrb, img_src): # use median blurring to remove signal noise in YCRCB domain raw_yrb = cv2.medianBlur(raw_yrb, 5) mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb) # morphological transform to remove unwanted part kernel = np.ones((5, 5), np.uint8) #mask_skin = cv2.morphologyEx(mask_skin, cv2.MORPH_OPEN, kernel) mask_skin = cv2.dilate(mask_skin, kernel, iterations=2) res_skin = cv2.bitwise_and(img_src, img_src, mask=mask_skin) #res_skin_dn = cv2.fastNlMeansDenoisingColored(res_skin, None, 10, 10, 7,21) return res_skin # Do background subtraction with some filtering
def animpingpong(self): print self print self.Object print self.Object.Name obj=self.Object img = cv2.imread(obj.imageFile) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) gray = np.float32(gray) dst = cv2.cornerHarris(gray,3,3,0.00001) dst = cv2.dilate(dst,None) img[dst>0.01*dst.max()]=[0,0,255] from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show()
def cannyThresholding(self, contour_retrieval_mode = cv2.RETR_LIST): ''' contour_retrieval_mode is passed through as second argument to cv2.findContours ''' # Attempt to match edges found in blue, green or red channels : collect all channel = 0 for gray in cv2.split(self.img): channel += 1 print('channel %d ' % channel) title = self.tgen.next('channel-%d' % channel) if self.show: ImageViewer(gray).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) found = {} for thrs in xrange(0, 255, 26): print('Using threshold %d' % thrs) if thrs == 0: print('First step') bin = cv2.Canny(gray, 0, 50, apertureSize=5) title = self.tgen.next('canny-%d' % channel) if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) bin = cv2.dilate(bin, None) title = self.tgen.next('canny-dilate-%d' % channel) if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) else: retval, bin = cv2.threshold(gray, thrs, 255, cv2.THRESH_BINARY) title = self.tgen.next('channel-%d-threshold-%d' % (channel, thrs)) if self.show: ImageViewer(bin).show(window='Next threshold (n to continue)', destroy = self.destroy, info = self.info, thumbnailfn = title) bin, contours, hierarchy = cv2.findContours(bin, contour_retrieval_mode, cv2.CHAIN_APPROX_SIMPLE) title = self.tgen.next('channel-%d-threshold-%d-contours' % (channel, thrs)) if self.show: ImageViewer(bin).show(window = title, destroy = self.destroy, info = self.info, thumbnailfn = title) if contour_retrieval_mode == cv2.RETR_LIST or contour_retrieval_mode == cv2.RETR_EXTERNAL: filteredContours = contours else: filteredContours = [] h = hierarchy[0] for component in zip(contours, h): currentContour = component[0] currentHierarchy = component[1] if currentHierarchy[3] < 0: # Found the outermost parent component filteredContours.append(currentContour) print('Contours filtered. Input %d Output %d' % (len(contours), len(filteredContours))) time.sleep(5) for cnt in filteredContours: cnt_len = cv2.arcLength(cnt, True) cnt = cv2.approxPolyDP(cnt, 0.02*cnt_len, True) cnt_len = len(cnt) cnt_area = cv2.contourArea(cnt) cnt_isConvex = cv2.isContourConvex(cnt) if cnt_len == 4 and (cnt_area > self.area_min and cnt_area < self.area_max) and cnt_isConvex: cnt = cnt.reshape(-1, 2) max_cos = np.max([angle_cos( cnt[i], cnt[(i+1) % 4], cnt[(i+2) % 4] ) for i in xrange(4)]) if max_cos < self.cos_limit : sq = Square(cnt, cnt_area, cnt_isConvex, max_cos) self.squares.append(sq) else: #print('dropped a square with max_cos %f' % max_cos) pass found[thrs] = len(self.squares) print('Found %d quadrilaterals with threshold %d' % (len(self.squares), thrs))
def find_chars(img): gray = np.array(img.convert("L")) ret, mask = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY) image_final = cv2.bitwise_and(gray, gray, mask=mask) ret, new_img = cv2.threshold(image_final, 180, 255, cv2.THRESH_BINARY_INV) kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3)) dilated = cv2.dilate(new_img, kernel, iterations=1) # Image.fromarray(dilated).save('out.png') # for debugging _, contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) coords = [] for contour in contours: # get rectangle bounding contour [x, y, w, h] = cv2.boundingRect(contour) # ignore large chars (probably not chars) if w > 70 and h > 70: continue coords.append((x, y, w, h)) return coords # find list of eye coordinates in image
def find_components(im, max_components=16): """Dilate the image until there are just a few connected components. Returns contours for these components.""" kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (10, 10)) dilation = dilate(im, kernel, 6) count = 21 n = 0 sigma = 0.000 while count > max_components: n += 1 sigma += 0.005 result = cv2.findContours(dilation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) if len(result) == 3: _, contours, hierarchy = result elif len(result) == 2: contours, hierarchy = result possible = find_likely_rectangles(contours, sigma) count = len(possible) return (dilation, possible, n)
def find_components(edges, max_components=16): """Dilate the image until there are just a few connected components. Returns contours for these components.""" # Perform increasingly aggressive dilation until there are just a few # connected components. count = 21 dilation = 5 n = 1 while count > 16: n += 1 dilated_image = dilate(edges, N=3, iterations=n) _, contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) count = len(contours) #print dilation #Image.fromarray(edges).show() #Image.fromarray(255 * dilated_image).show() return contours
def remove_ridges(image, width=6, threshold=160, dilation=1, return_mask=False): """Detect ridges of width pixels using the highest eigenvector of the Hessian matrix, then create a binarized mask with threshold and remove it from image (set to black). Default values are optimized for text detection and removal. A dilation radius in pixels can be passed in to thicken the mask prior to being applied.""" gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # The value of sigma is calculated according to Steger's work: # An Unbiased Detector of Curvilinear Structures, # IEEE Transactions on Pattern Analysis and Machine Intelligence, # Vol. 20, No. 2, Feb 1998 # http://ieeexplore.ieee.org/document/659930/ sigma = (width / 2) / np.sqrt(3) hxx, hxy, hyy = feature.hessian_matrix(gray_image, sigma=sigma, order='xy') large_eigenvalues, _ = feature.hessian_matrix_eigvals(hxx, hxy, hyy) mask = convert(large_eigenvalues) mask = binarize_image(mask, method='boolean', threshold=threshold) if dilation: dilation = (2 * dilation) + 1 dilation_kernel = np.ones((dilation, dilation), np.uint8) mask = cv2.dilate(mask, dilation_kernel) return image, 255 - mask
def get_contour_portion(images,segb): ns = images.shape[0]; nt = images.shape[1]; portion = np.zeros((ns,nt)); for s in range(ns): for t in range(nt): img = images[s,t,0]; seg = segb[nt*s+t,0]; if np.sum(seg)<10: portion[s,t] = 0.0; continue; mask = cv2.dilate(seg,np.ones((7,7)))-seg>0; z = img[mask]; x,y = np.where(mask); lvinside = np.mean(img[seg>0]); lvoutside = np.percentile(z,20); ccut = lvinside * 0.3 + lvoutside * 0.7; cnt_sh = get_contour_shape(x,y,z); if cnt_sh is None: portion[s,t] = 0.0; else: res = get_eff_portion(cnt_sh,ccut); portion[s,t] = res; return portion;
def _do_filter(self, frame): ''' Process a single frame. ''' # blur to reduce noise frame = cv2.GaussianBlur(frame, (5, 5), 0, borderType=cv2.BORDER_CONSTANT) # threshold to find contiguous regions of "bright" pixels # ignore all "dark" (<1/8 max) pixels max = numpy.max(frame) min = numpy.min(frame) # if the frame is completely dark, then just return it if max == min: return frame threshold = min + (max - min) / 8 _, frame = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY) # filter out single pixels and other noise frame = cv2.erode(frame, self._element_shrink) # restore and join nearby regions (in case one fish has a skinny middle...) frame = cv2.dilate(frame, self._element_grow) return frame
def __init__(self): super(TargetFilterBGSub, self).__init__() # background subtractor #self._bgs = cv2.BackgroundSubtractorMOG() #self._bgs = cv2.BackgroundSubtractorMOG2() # not great defaults, and need bShadowDetection to be False #self._bgs = cv2.BackgroundSubtractorMOG(history=10, nmixtures=3, backgroundRatio=0.2, noiseSigma=20) # varThreshold: higher values detect fewer/smaller changed regions self._bgs = cv2.createBackgroundSubtractorMOG2(history=0, varThreshold=8, detectShadows=False) # ??? history is ignored? Only if learning_rate is > 0, or...? Unclear. # Learning rate for background subtractor. # 0 = never adapts after initial background creation. # A bit above 0 looks good. # Lower values are better for detecting slower movement, though it # takes a bit of time to learn the background initially. self._learning_rate = 0.001 # elements to reuse in erode/dilate # CROSS elimates more horizontal/vertical lines and leaves more # blobs with extent in both axes [than RECT]. self._element_shrink = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5)) self._element_grow = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(7,7))
def process(img): img=cv2.medianBlur(img,5) kernel=np.ones((3,3),np.uint8) #img=cv2.erode(img,kernel,iterations = 1) sobel = cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 3) element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 1)) element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) dilation = cv2.dilate(sobel, element2, iterations = 1) erosion = cv2.erode(dilation, element1, iterations = 1) dilation2 = cv2.dilate(erosion, element2,iterations = 3) #img=cv2.dilate(img,kernel,iterations = 1) #img=cv2.Canny(img,100,200) return dilation2
def logoDetect(img,imgo): '''???????????????''' imglogo=imgo.copy() img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) img=cv2.resize(img,(2*img.shape[1],2*img.shape[0]),interpolation=cv2.INTER_CUBIC) #img=cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,-3) ret,img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) #img=cv2.Sobel(img, cv2.CV_8U, 1, 0, ksize = 9) img=cv2.Canny(img,100,200) element1 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) element2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) img = cv2.dilate(img, element2,iterations = 1) img = cv2.erode(img, element1, iterations = 3) img = cv2.dilate(img, element2,iterations = 3) #???? im2, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) tema=0 result=[] for con in contours: x,y,w,h=cv2.boundingRect(con) area=w*h ratio=max(w/h,h/w) if area>300 and area<20000 and ratio<2: if area>tema: tema=area result=[x,y,w,h] ratio2=ratio #?????????????????,?????????? logo2_X=[int(result[0]/2+plate[0]-3),int(result[0]/2+plate[0]+result[2]/2+3)] logo2_Y=[int(result[1]/2+max(0,plate[1]-plate[3]*3.0)-3),int(result[1]/2+max(0,plate[1]-plate[3]*3.0)+result[3]/2)+3] cv2.rectangle(img,(result[0],result[1]),(result[0]+result[2],result[1]+result[3]),(255,0,0),2) cv2.rectangle(imgo,(logo2_X[0],logo2_Y[0]),(logo2_X[1],logo2_Y[1]),(0,0,255),2) print tema,ratio2,result logo2=imglogo[logo2_Y[0]:logo2_Y[1],logo2_X[0]:logo2_X[1]] cv2.imwrite('./logo2.jpg',logo2) return img
def hsvModer(self, index, hsv_valueT, hsv_value_B): img_BGR = self.img[index] img_RGB = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB) img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV) lower_red = np.array(hsv_value_B) upper_red = np.array(hsv_valueT) mask = cv2.inRange(img_HSV, lower_red, upper_red) res = cv2.bitwise_and(img_RGB, img_RGB, mask=mask) if self.erosion: kernel = np.ones((5, 5), np.uint8) res = cv2.erode(res, kernel, iterations=1) if self.dilate: kernel = np.ones((9, 9), np.uint8) res = cv2.dilate(res, kernel, iterations=1) return res
def find_components(edges, max_components=16): """Dilate the image until there are just a few connected components. Returns contours for these components.""" # Perform increasingly aggressive dilation until there are just a few # connected components. count = 21 dilation = 5 n = 1 while count > 16: n += 1 dilated_image = dilate(edges, N=3, iterations=n) contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) count = len(contours) #print dilation #Image.fromarray(edges).show() #Image.fromarray(255 * dilated_image).show() return contours
def get_bounding_rect( cap, win_cap, win, upper, lower): msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3) im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) if len(contours) > 0: areas = [cv2.contourArea(c) for c in contours] # get the area of each contour max_index = np.argmax(areas) # get the index of the largest contour by area cnts = contours[max_index] # get the largest contout by area cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box cv2.imshow( "debug.", win_cap ) try: self.smt_dash.putNumber('vis_x', x) self.smt_dash.putNumber('vis_y', y) self.smt_dash.putNumber('vis_w', w) self.smt_dash.putNumber('vis_h', h) except Exception: pass
def img_pre_treatment(file_path): im = cv2.imread(file_path) resize_pic=cv2.resize(im,(640,480),interpolation=cv2.INTER_CUBIC) resize_pic = cv2.GaussianBlur(resize_pic,(5,5),0) cv2.imwrite('static/InterceptedIMG/resize.jpg',resize_pic) kernel = np.ones((3,3),np.uint8) resize_pic = cv2.erode(resize_pic,kernel,iterations = 3) resize_pic = cv2.dilate(resize_pic,kernel,iterations = 3) cv2.imshow('image',resize_pic) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() gray = cv2.cvtColor(resize_pic,cv2.COLOR_BGR2GRAY) ret, binary = cv2.threshold(gray,90,255,cv2.THRESH_BINARY) cv2.imshow('image',binary) k = cv2.waitKey(0) & 0xFF if k == 27: cv2.destroyAllWindows() return resize_pic,binary
def contrast_image(image, thresh1=180, thresh2=200, show=False): image = imutils.resize(image, height=scale_factor) # convert it to grayscale, and blur it slightly gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray2 = cv2.GaussianBlur(gray, (5, 5), 0) # threshold the image, then perform a series of erosions + dilations to remove any small regions of noise thresh = cv2.threshold(gray2, thresh1, thresh2, cv2.THRESH_BINARY)[1] thresh2 = cv2.erode(thresh, None, iterations=2) thresh3 = cv2.dilate(thresh2, None, iterations=2) if show is True: #this is for debugging puposes cv2.imshow("Contrast", thresh3) cv2.waitKey(0) cv2.destroyAllWindows() return thresh
def process_word(thresh,output): # assign 2 rectangle kernel size 1 vertical and the other will be horizontal kernel = np.ones((2,1), np.uint8) kernel2 = np.ones((1,4), np.uint8) # use closing morph operation but fewer iterations than the letter then erode to narrow the image temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=2) #temp_img = cv2.erode(thresh,kernel,iterations=2) word_img = cv2.dilate(temp_img,kernel2,iterations=1) (contours, _) = cv2.findContours(word_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x,y,w,h = cv2.boundingRect(cnt) cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1) return output #processing line by line boxing
def process_line(thresh,output): # assign a rectangle kernel size 1 vertical and the other will be horizontal kernel = np.ones((1,5), np.uint8) kernel2 = np.ones((2,4), np.uint8) # use closing morph operation but fewer iterations than the letter then erode to narrow the image temp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel2,iterations=2) #temp_img = cv2.erode(thresh,kernel,iterations=2) line_img = cv2.dilate(temp_img,kernel,iterations=5) (contours, _) = cv2.findContours(line_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: x,y,w,h = cv2.boundingRect(cnt) cv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1) return output #processing par by par boxing
def detect_shirt(self): #self.dst=cv2.inRange(self.norm_rgb,np.array([self.lb,self.lg,self.lr],np.uint8),np.array([self.b,self.g,self.r],np.uint8)) self.dst=cv2.inRange(self.norm_rgb,np.array([20,20,20],np.uint8),np.array([255,110,80],np.uint8)) cv2.threshold(self.dst,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY) fg=cv2.erode(self.dst,None,iterations=2) #cv2.imshow("fore",fg) bg=cv2.dilate(self.dst,None,iterations=3) _,bg=cv2.threshold(bg, 1,128,1) #cv2.imshow("back",bg) mark=cv2.add(fg,bg) mark32=np.int32(mark) cv2.watershed(self.norm_rgb,mark32) self.m=cv2.convertScaleAbs(mark32) _,self.m=cv2.threshold(self.m,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #cv2.imshow("final_tshirt",self.m) cntr,h=cv2.findContours(self.m,cv2.cv.CV_RETR_EXTERNAL,cv2.cv.CV_CHAIN_APPROX_SIMPLE) return self.m,cntr
def movement(mat_1,mat_2): mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY) mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1)) _,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0) mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY) mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1)) _,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0) mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray) mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2)) _,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0) mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval))) mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4))) _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0:return True #If there were any movements return False #if not #Pedestrian Recognition Thread
def get_init_process_img(roi_img): """ ????????????????????????????????????? :param roi_img: ndarray :return: ndarray """ h = cv2.Sobel(roi_img, cv2.CV_32F, 0, 1, -1) v = cv2.Sobel(roi_img, cv2.CV_32F, 1, 0, -1) img = cv2.add(h, v) img = cv2.convertScaleAbs(img) img = cv2.GaussianBlur(img, (3, 3), 0) ret, img = cv2.threshold(img, 120, 255, cv2.THRESH_BINARY) kernel = np.ones((1, 1), np.uint8) img = cv2.erode(img, kernel, iterations=1) img = cv2.dilate(img, kernel, iterations=2) img = cv2.erode(img, kernel, iterations=1) img = cv2.dilate(img, kernel, iterations=2) img = auto_canny(img) return img
def dilate(im, iterations=1): return cv2.dilate(im, None, iterations=iterations)
def erode_dilate(im, iterations=1): return dilate(erode(im, iterations=iterations), iterations)
def dilate_erode(im, iterations=1): return erode(dilate(im, iterations=iterations), iterations)
def equalize(image, image_lower=0.0, image_upper=255.0): image_lower = int(image_lower*2)/2 image_lower +=1 image_lower = max(3,image_lower) mean = cv2.medianBlur(image,255) image = image - (mean-100) # kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3)) # cv2.dilate(image, kernel, image, iterations=1) return image
def background_subtract(self, img_src): fgmask = self.fgbg.apply(cv2.GaussianBlur(img_src, (25, 25), 0)) kernel = np.ones((5, 5), np.uint8) fgmask = cv2.dilate(fgmask, kernel, iterations=2) #fgmask = self.fgbg.apply(cv2.medianBlur(img_src, 11)) org_fg = cv2.bitwise_and(img_src, img_src, mask=fgmask) return org_fg # Update Position of ROI
def alpha_image(img, points, blur=0, dilate=0): mask = mask_from_points(img.shape[:2], points) if dilate > 0: kernel = np.ones((dilate, vdilate), np.uint8) mask = cv2.dilate(mask, kernel) if blur > 0: mask = cv2.blur(mask, (blur, blur)) return np.dstack((img, mask))
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: img = obj.imageNode.ViewObject.Proxy.img.copy() print (obj.blockSize,obj.ksize,obj.k) try: gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) gray = np.float32(gray) print "normale" except: im2=cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) gray = cv2.cvtColor(im2,cv2.COLOR_RGB2GRAY) print "except" dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000) dst = cv2.dilate(dst,None) img[dst>0.01*dst.max()]=[0,0,255] dst2=img.copy() dst2[dst<0.01*dst.max()]=[255,255,255] dst2[dst>0.01*dst.max()]=[0,0,255] if not obj.matplotlib: cv2.imshow(obj.Label,img) else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst2,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() self.img=img
def execute_Morphing(proxy,obj): try: img=obj.sourceObject.Proxy.img.copy() except: img=cv2.imread(__dir__+'/icons/freek.png') ks=obj.kernel kernel = np.ones((ks,ks),np.uint8) if obj.filter == 'dilation': dilation = cv2.dilate(img,kernel,iterations = 1) img=dilation if obj.filter == 'erosion': dilation = cv2.erode(img,kernel,iterations = 1) img=dilation if obj.filter == 'opening': dilation = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) img=dilation if obj.filter == 'closing': dilation = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel) img=dilation obj.Proxy.img = img # # property functions for HoughLines #
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" print (obj.blockSize,obj.ksize,obj.k) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) gray = np.float32(gray) # dst = cv2.cornerHarris(gray,3,3,0.00001) dst = cv2.cornerHarris(gray,obj.blockSize,obj.ksize*2+1,obj.k/10000) dst = cv2.dilate(dst,None) img[dst>0.01*dst.max()]=[0,0,255] if True: print "zeige" cv2.imshow(obj.Label,img) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=img
def checkForSkin(IMG10): high,widt=IMG10.shape[:2] B1=np.reshape(np.float32(IMG10[:,:,0]),high*widt)#B G1=np.reshape(np.float32(IMG10[:,:,1]),high*widt)#G R1=np.reshape(np.float32(IMG10[:,:,2]),high*widt)#Rs #print high,widt h3=np.zeros((high,widt,3),np.uint8) #cv2.imshow("onetime",h) tem=np.logical_and(np.logical_and(np.logical_and(np.logical_and(R1 > 95, G1 > 40),np.logical_and(B1 > 20, (np.maximum(np.maximum(R1,G1),B1) - np.minimum(np.minimum(R1,G1),B1)) > 15)),R1>B1),np.logical_and(np.absolute(R1-G1) > 15,R1>G1)) h5=np.array(tem).astype(np.uint8,order='C',casting='unsafe') h5=np.reshape(h5,(high,widt)) h3[:,:,0]=h5 h3[:,:,1]=h5 h3[:,:,2]=h5 #cv2.imshow("thirdtime",h3) kernel1 = np.ones((3,3),np.uint8) closedH3=np.copy(h3) for i in range(5): closedH3 = cv2.erode(closedH3,kernel1) for i in range(5): closedH3 = cv2.dilate(closedH3,kernel1) #cv2.imshow("closedH3",closedH3) # closedH3 = cv2.cvtColor(closedH3, cv2.COLOR_BGR2RGB) return closedH3
def find(self, image): hsv_frame = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_frame, self.__hsv_bounds[0], self.__hsv_bounds[1]) mask = cv2.erode(mask, None, iterations=2) mask = cv2.dilate(mask, None, iterations=2) contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2] if len(contours) == 0: return (False, False) largest_contour = max(contours, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(largest_contour) M = cv2.moments(largest_contour) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) return (center, radius)
def segment(self, im): mask = np.square(im.astype('float32') - self.bgim ).sum(axis=2) / 20 mask = np.clip(mask, 0, 255).astype('uint8') mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, self.kernel) mask = cv2.dilate(mask, self.dilate_k) mask = mask.astype('uint8') return (mask > 10).astype('float32') *255
def convert_to_linedrawing(self, luminous_image_data): kernel = numpy.ones((3, 3), numpy.uint8) linedrawing = cv2.Canny(luminous_image_data, 5, 125) linedrawing = cv2.bitwise_not(linedrawing) linedrawing = cv2.erode(linedrawing, kernel, iterations=1) linedrawing = cv2.dilate(linedrawing, kernel, iterations=1) return linedrawing
def convert_to_linedrawing(self, luminous_image_data): neiborhood24 = numpy.array([[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1]], numpy.uint8) dilated = cv2.dilate(luminous_image_data, neiborhood24, iterations=1) diff = cv2.absdiff(dilated, luminous_image_data) linedrawing = cv2.bitwise_not(diff) return linedrawing
def morph_single(y_out): """Morphological transform. Args: y_out: [T, H, W] """ y_out_morph = np.zeros(y_out.shape) kernel = np.ones([5, 5]) for ch in xrange(y_out.shape[0]): y_out_morph[ch] = cv2.dilate(y_out[ch], kernel) return y_out_morph
def border(self, alpha, size, kernel_type='RECT'): """ alpha : alpha layer of the text size : size of the kernel kernel_type : one of [rect,ellipse,cross] @return : alpha layer of the border (color to be added externally). """ kdict = {'RECT':cv.MORPH_RECT, 'ELLIPSE':cv.MORPH_ELLIPSE, 'CROSS':cv.MORPH_CROSS} kernel = cv.getStructuringElement(kdict[kernel_type],(size,size)) border = cv.dilate(alpha,kernel,iterations=1) # - alpha return border
def build_mask(self, image): """ Build the mask to find the path edges """ kernel = np.ones((3, 3), np.uint8) img = cv2.bilateralFilter(image, 9, 75, 75) img = cv2.erode(img, kernel, iterations=1) hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv, self.lower_gray, self.upper_gray) mask2 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR) mask2 = cv2.erode(mask2, kernel) mask2 = cv2.dilate(mask2, kernel, iterations=1) return mask2