我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用cv2.blur()。
def canny(im, blur=3): im_blur = cv2.blur(im, (blur,blur)) return cv2.Canny(im_blur, 50, 150, blur)
def canny(img, lowThreshold): """ Performs canny edge detection on the provided grayscale image. :param img: a grayscale image :param lowThreshold: threshold for the canny operation :return: binary image containing the edges found by canny """ dst = np.zeros(img.shape, dtype=img.dtype) cv2.blur(img, (3, 3), dst) # canny recommends that the high threshold be 3 times the low threshold # the kernel size is 3 as defined above return cv2.Canny(dst, lowThreshold, lowThreshold * 3, dst, 3)
def LinearMotionBlur3C(img): """Performs motion blur on an image with 3 channels. Used to simulate blurring caused due to motion of camera. Args: img(NumPy Array): Input image with 3 channels Returns: Image: Blurred image by applying a motion blur with random parameters """ lineLengths = [3,5,7,9] lineTypes = ["right", "left", "full"] lineLengthIdx = np.random.randint(0, len(lineLengths)) lineTypeIdx = np.random.randint(0, len(lineTypes)) lineLength = lineLengths[lineLengthIdx] lineType = lineTypes[lineTypeIdx] lineAngle = randomAngle(lineLength) blurred_img = img for i in xrange(3): blurred_img[:,:,i] = PIL2array1C(LinearMotionBlur(img[:,:,i], lineLength, lineAngle, lineType)) blurred_img = Image.fromarray(blurred_img, 'RGB') return blurred_img
def computeWeightsLocallyNormalized(I, centered_gradient=True, norm_radius=45): h,w = I.shape[:2] if centered_gradient: gy,gx = np.gradient(I)[:2] gysq = (gy**2).mean(axis=2) if gy.ndim > 2 else gy**2 gxsq = (gx**2).mean(axis=2) if gx.ndim > 2 else gx**2 gxsq_local_mean = cv2.blur(gxsq, ksize=(norm_radius, norm_radius)) gysq_local_mean = cv2.blur(gysq, ksize=(norm_radius, norm_radius)) w_horizontal = np.exp( - gxsq * 1.0/(2*np.maximum(1e-6, gxsq_local_mean))) w_vertical = np.exp( - gysq * 1.0/(2*np.maximum(1e-6, gysq_local_mean))) else: raise Exception("NotImplementedYet") return w_horizontal, w_vertical
def generate_defect_img(img,min_num,max_num,label_img): # label_img = np.zeros_like(img) # if random.random > 0.9: # generate_crack(img,label_img,1,(0.01,0.05),6,(0.1,0.8)) #method_list = [blur,scratch,spot] method_list = [blur,scratch,spot] num = random.randint(min_num,max_num) print num for i in range(num): fun_index = random.randint(0,len(method_list)-1) method_list[fun_index](img,label_img) # generate_blur(img,1,(0.05,0.3),(0.05,0.3)) # generate_scratch(img,1,(0.001,0.05),20,(0.01,0.4)) # generate_spot(img,1,(0.001,0.008),1.5) #return label_img
def get_bounding_rect( cap, win_cap, win, upper, lower): msk = cv2.dilate(cv2.erode( cv2.inRange( cv2.blur( cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ), (5,5) ), np.array(lower), np.array(upper) ), None, iterations=3), None, iterations=3) im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) if len(contours) > 0: areas = [cv2.contourArea(c) for c in contours] # get the area of each contour max_index = np.argmax(areas) # get the index of the largest contour by area cnts = contours[max_index] # get the largest contout by area cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box cv2.imshow( "debug.", win_cap ) try: self.smt_dash.putNumber('vis_x', x) self.smt_dash.putNumber('vis_y', y) self.smt_dash.putNumber('vis_w', w) self.smt_dash.putNumber('vis_h', h) except Exception: pass
def weightedLoss(y_true, y_pred): # compute weights # a = cv2.blur(y_true, (11,11)) # ind = (a > 0.01) * (a < 0.99) # ind = ind.astype(np.float32) # weights = np.ones(a.shape) a = K.pool2d(y_true, (11,11), strides=(1, 1), padding='same', data_format=None, pool_mode='avg') ind = K.cast(K.greater(a, 0.01), dtype='float32') * K.cast(K.less(a, 0.99), dtype='float32') weights = K.cast(K.greater_equal(a, 0), dtype='float32') w0 = K.sum(weights) # w0 = weights.sum() weights = weights + ind * 2 w1 = K.sum(weights) # w1 = weights.sum() weights = weights / w1 * w0 return weightedBCELoss2d(y_true, y_pred, weights) + weightedSoftDiceLoss(y_true, y_pred, weights)
def postprocess_masks(masks,new_size = None): if new_size is not None: masks_p = np.ndarray((masks.shape[0], masks.shape[1]) + new_size, dtype=np.float32) for i in range(masks.shape[0]): masks_p[i, 0] = cv2.resize(masks[i, 0], (new_size[1],new_size[0]), interpolation=cv2.INTER_LINEAR) else: masks_p = masks.copy() masks_p[np.where(np.sum(np.sum(masks_p,axis = -1),axis = -1)[:,0]<4000)] = 0 for i in range(masks.shape[0]): masks_p[i,0] = cv2.blur(masks_p[i,0],(30,30)) masks_p = np.round(masks_p) for i in range(masks.shape[0]): blurred = cv2.blur(masks_p[i,0],(100,100)) masks_p[(i,0)+np.where(blurred<0.1)] =0 masks_p[np.where(np.sum(np.sum(masks_p,axis = -1),axis = -1)[:,0]<1500)] = 0 return masks_p.astype(np.uint8)
def __blur(src, type, radius): """Softens an image using one of several filters. Args: src: The source mat (numpy.ndarray). type: The blurType to perform represented as an int. radius: The radius for the blur as a float. Returns: A numpy.ndarray that has been blurred. """ if(type is BlurType.Box_Blur): ksize = int(2 * round(radius) + 1) return cv2.blur(src, (ksize, ksize)) elif(type is BlurType.Gaussian_Blur): ksize = int(6 * round(radius) + 1) return cv2.GaussianBlur(src, (ksize, ksize), round(radius)) elif(type is BlurType.Median_Filter): ksize = int(2 * round(radius) + 1) return cv2.medianBlur(src, ksize) else: return cv2.bilateralFilter(src, -1, round(radius), round(radius))
def movement(mat_1,mat_2): mat_1_gray = cv2.cvtColor(mat_1.copy(),cv2.COLOR_BGR2GRAY) mat_1_gray = cv2.blur(mat_1_gray,(blur1,blur1)) _,mat_1_gray = cv2.threshold(mat_1_gray,100,255,0) mat_2_gray = cv2.cvtColor(mat_2.copy(),cv2.COLOR_BGR2GRAY) mat_2_gray = cv2.blur(mat_2_gray,(blur1,blur1)) _,mat_2_gray = cv2.threshold(mat_2_gray,100,255,0) mat_2_gray = cv2.bitwise_xor(mat_1_gray,mat_2_gray) mat_2_gray = cv2.blur(mat_2_gray,(blur2,blur2)) _,mat_2_gray = cv2.threshold(mat_2_gray,70,255,0) mat_2_gray = cv2.erode(mat_2_gray,np.ones((erodeval,erodeval))) mat_2_gray = cv2.dilate(mat_2_gray,np.ones((4,4))) _, contours,__ = cv2.findContours(mat_2_gray,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) if len(contours) > 0:return True #If there were any movements return False #if not #Pedestrian Recognition Thread
def partial_blur(img, points, kenel_size = 9, type = 1): """ Partial Gaussian blur within convex hull of points. Args: type = 0 for Gaussian blur type = 1 for average blur """ points = cv2.convexHull(points) copy_img = img.copy() black = (0, 0, 0) if type: cv2.blur(img, (kenel_size, kenel_size)) else: cv2.GaussianBlur(img, (kenel_size, kenel_size), 0) cv2.fillConvexPoly(copy_img, points, color = black) for row in range(img.shape[:2][0]): for col in range(img.shape[:2][1]): if numpy.array_equal(copy_img[row][col], black): copy_img[row][col] = blur_img[row][col] return copy_img
def correct_colours(im1, im2, landmarks1): """ Attempt to change the colouring of im2 to match that of im1. It does this by dividing im2 by a gaussian blur of im2, and then multiplying by a gaussian blur of im1. """ blur_amount = COLOUR_CORRECT_BLUR_FRAC * numpy.linalg.norm( numpy.mean(landmarks1[LEFT_EYE_POINTS], axis=0) - numpy.mean(landmarks1[RIGHT_EYE_POINTS], axis=0)) blur_amount = int(blur_amount) if blur_amount % 2 == 0: blur_amount += 1 im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0) im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0) # Avoid divide-by-zero errors. im2_blur += (128 * (im2_blur <= 1.0)).astype(im2_blur.dtype) return (im2.astype(numpy.float64) * im1_blur.astype(numpy.float64) / im2_blur.astype(numpy.float64))
def get_frame(self): ret,frame = self.cap.read(self.camera_id) self.frame = cv2.resize(frame,None,fx=self.img_zoomx, fy=self.img_zoomy, \ interpolation = cv2.INTER_AREA) self.frame = cv2.blur(self.frame, (3,3)) self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) self.frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) self.colors = [] if self.escaneando: self.draw_osd(self.frame) return self.frame
def test_box_filter_reflect_101(self): I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32) r = 2 ret1 = cv.smooth.box_filter(I, r, normalize=True) ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_DEFAULT) self.assertTrue(np.array_equal(ret1, ret2))
def test_box_filter_reflect(self): I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32) r = 2 ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='reflect') ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_REFLECT) self.assertTrue(np.array_equal(ret1, ret2))
def test_box_filter_edge(self): I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32) r = 2 ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='edge') ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_REPLICATE) self.assertTrue(np.array_equal(ret1, ret2))
def test_box_filter_zero(self): I = np.array(range(1, 50)).reshape(7, 7).astype(np.float32) r = 2 ret1 = cv.smooth.box_filter(I, r, normalize=True, border_type='zero') ret2 = cv2.blur(I, (5,5), borderType=cv2.BORDER_CONSTANT) self.assertTrue(np.array_equal(ret1, ret2))
def sobel(im, dx=1, dy=1, blur=3): if blur is None or blur == 0: blur_im = im else: blur_im = cv2.GaussianBlur(im, (blur,blur), 0) return cv2.Sobel(blur_im, cv2.CV_8U, dx, dy)
def sobel_threshold(im, dx=1, dy=1, blur=3, threshold=10): return (sobel(im, dx=dx, dy=dy, blur=blur) > threshold).astype(np.uint8) * 255
def alpha_image(img, points, blur=0, dilate=0): mask = mask_from_points(img.shape[:2], points) if dilate > 0: kernel = np.ones((dilate, vdilate), np.uint8) mask = cv2.dilate(mask, kernel) if blur > 0: mask = cv2.blur(mask, (blur, blur)) return np.dstack((img, mask))
def averageBlur(srcpath, dstpath): img = cv2.imread(srcpath, 0) #???????? blur = cv2.blur(img,(3,5))#????3*5 # cv2.imwrite(dstpath, blur) plt.subplot(1,2,1),plt.imshow(img,'gray') plt.subplot(1,2,2),plt.imshow(blur,'gray') plt.show() # ????
def gaussianBlur(srcpath, dstpath): img = cv2.imread(srcpath, 0) #???????? blur = cv2.GaussianBlur(img,(5,5),0) # cv2.imwrite(dstpath, blur) plt.subplot(1,2,1),plt.imshow(img,'gray') plt.subplot(1,2,2),plt.imshow(blur,'gray') plt.show() # ????
def medianBlur(srcpath, dstpath): img = cv2.imread(srcpath, 0) blur = cv2.medianBlur(img, 3) # cv2.imshow(dstpath, img) # cv2.imwrite(dstpath, blur) plt.subplot(1,2,1),plt.imshow(img,'gray') plt.subplot(1,2,2),plt.imshow(blur,'gray') plt.show() # ????
def bilateralFilter(srcpath, dstpath): img = cv2.imread(srcpath, 0) # 9---?????? # ?????????????????????????? blur = cv2.bilateralFilter(img,9,75,75) # cv2.imwrite(dstpath, blur) plt.subplot(1,2,1),plt.imshow(img,'gray') plt.subplot(1,2,2),plt.imshow(blur,'gray') plt.show()
def blur_image(image): if random.randint(0, 10) == 0: intencity = random.randint(1, 5) image = cv2.blur(image, (intencity, intencity)) return image
def pretty_blur_map(blur_map, sigma=5): abs_image = numpy.log(numpy.abs(blur_map).astype(numpy.float32)) cv2.blur(abs_image, (sigma, sigma)) return cv2.medianBlur(abs_image, sigma)
def oneFileComparison(filename): gaussianFilterVals = list(range(1,30,2)) gaussianFilterVals.insert(0,0) img = _openImage(filename) fn = lambda x, img=img: img if x==0 else cv2.blur(img, (x, x) ) _procedure(fn, gaussianFilterVals, gaussianFilterVals, 'artificial blur', filename)
def _computeCoefficients(self, p): r = self._radius I = self._I Ir, Ig, Ib = I[:, :, 0], I[:, :, 1], I[:, :, 2] p_mean = cv2.blur(p, (r, r)) Ipr_mean = cv2.blur(Ir * p, (r, r)) Ipg_mean = cv2.blur(Ig * p, (r, r)) Ipb_mean = cv2.blur(Ib * p, (r, r)) Ipr_cov = Ipr_mean - self._Ir_mean * p_mean Ipg_cov = Ipg_mean - self._Ig_mean * p_mean Ipb_cov = Ipb_mean - self._Ib_mean * p_mean ar = self._Irr_inv * Ipr_cov + self._Irg_inv * Ipg_cov + self._Irb_inv * Ipb_cov ag = self._Irg_inv * Ipr_cov + self._Igg_inv * Ipg_cov + self._Igb_inv * Ipb_cov ab = self._Irb_inv * Ipr_cov + self._Igb_inv * Ipg_cov + self._Ibb_inv * Ipb_cov b = p_mean - ar * self._Ir_mean - ag * self._Ig_mean - ab * self._Ib_mean ar_mean = cv2.blur(ar, (r, r)) ag_mean = cv2.blur(ag, (r, r)) ab_mean = cv2.blur(ab, (r, r)) b_mean = cv2.blur(b, (r, r)) return ar_mean, ag_mean, ab_mean, b_mean
def make_mask(path): original_name = path.split('/')[-1] img, points = load_image_points(path) if img is None: return None if not os.path.exists('eyes'): os.makedirs('eyes') if not os.path.exists('masks'): os.makedirs('masks') masked = alpha_image(img, points, 1) masked = fill(masked, points[LEFT_EYE_POINTS]) masked = fill(masked, points[RIGHT_EYE_POINTS]) mask_path = 'masks/{}.mask.png'.format(original_name) cv2.imwrite(mask_path, masked) args = ['convert', mask_path, '-trim', '+repage', '-resize', '830x830', '-gravity', 'center', '-background', 'transparent', '-extent', '850x1100', mask_path + '.tmp.png'] subprocess.call(args) args = ['convert', mask_path+'.tmp.png', '-bordercolor', 'none', '-border', '2', '-background', 'black', '-alpha', 'background', '-channel', 'A', '-blur', '3x3', '-level', '0,01%', mask_path+'.tmp2.png'] subprocess.call(args) args = ['convert', 'bg.png', mask_path+'.tmp2.png', '-gravity', 'center', '-composite', '-matte', mask_path] subprocess.call(args) os.remove(mask_path+'.tmp.png') os.remove(mask_path+'.tmp2.png') left_eye_path = 'eyes/{}.left.png'.format(original_name) left_eye = alpha_image(img, points[LEFT_EYE_POINTS], dilate=5, blur=1) cv2.imwrite(left_eye_path, left_eye) subprocess.call(['mogrify', '-trim', '+repage', left_eye_path]) right_eye_path = 'eyes/{}.right.png'.format(original_name) right_eye = alpha_image(img, points[RIGHT_EYE_POINTS], dilate=5, blur=1) cv2.imwrite(right_eye_path, right_eye) subprocess.call(['mogrify', '-trim', '+repage', right_eye_path])
def find_robot(im): hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) lower = np.array([50, 28, 0]) upper = np.array([60, 168, 255]) mask = cv2.inRange(hsv, lower, upper) result = cv2.bitwise_and(im, im, mask=mask) blur = cv2.blur(result, (5, 5)) bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR) bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY) ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY) edges = cv2.Canny(th3, 100, 200) th4 = copy.copy(th3) perimeter = 0 j = 0 image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) cnt = np.array([]) for i in range(len(contours)): if (perimeter < cv2.contourArea(contours[i])): perimeter = cv2.contourArea(contours[i]) j = i; cnt = contours[j] x = 0 y = 0 for i in range(len(cnt)): x = x + cnt[i][0][0] y = y + cnt[i][0][1] x = x / len(cnt) y = y / len(cnt) #print x, y x = int(x) y = int(y) cv2.circle(im, (x, y), 5, (255, 0, 255), 2) #show_image(im) return (int(x), int(y))
def find_robot(frame): im = copy.copy(frame) hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) lower = np.array([50, 28, 0]) upper = np.array([60, 168, 255]) mask = cv2.inRange(hsv, lower, upper) result = cv2.bitwise_and(im, im, mask=mask) blur = cv2.blur(result, (5, 5)) bw = cv2.cvtColor(blur, cv2.COLOR_HSV2BGR) bw2 = cv2.cvtColor(bw, cv2.COLOR_BGR2GRAY) ret, th3 = cv2.threshold(bw2, 30, 255, cv2.THRESH_BINARY) edges = cv2.Canny(th3, 100, 200) th4 = copy.copy(th3) perimeter = 0 j = 0 image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) cnt = np.array([]) for i in range(len(contours)): if (perimeter < cv2.contourArea(contours[i])): perimeter = cv2.contourArea(contours[i]) j = i; cnt = contours[j] x = 0 y = 0 for i in range(len(cnt)): x = x + cnt[i][0][0] y = y + cnt[i][0][1] x = x / len(cnt) y = y / len(cnt) #print x, y x = int(x) y = int(y) cv2.circle(im, (x, y), 5, (255, 0, 255), 2) cv2.imshow('img', im) k = cv2.waitKey(0) cv2.imwrite('robot.jpg', im) #show_image(im) return (int(x), int(y))
def xmedian(ref,mwid): temp=np.isnan(ref) tmean=np.nanmean(ref) ref[temp]=tmean ref2=cv2.blur(ref,(mwid,mwid)) ref[temp]=ref2[temp] tempx=np.uint8(255*ref) return cv2.medianBlur(tempx,mwid)/255.0
def ymedian0(aero,cls,mwid): temp=np.isnan(aero) tmean=np.nanmean(aero) aero[temp]=tmean aero2=cv2.blur(aero,(mwid,mwid)) aero[temp]=aero2[temp] tempx=np.uint8(100*aero) aerox=cv2.medianBlur(tempx,mwid)/100.0 return aerox
def ymedian(aero,cls,mwid,twid): temp=np.isnan(aero) tmean=np.nanmean(aero) aero[temp]=tmean aero2=cv2.blur(aero,(mwid,mwid)) aero[temp]=aero2[temp] # 4/28/2016 #tempx=np.uint8(255*aero) tempx=np.uint8(100*aero) #aerox=cv2.medianBlur(tempx,mwid)/255.0 aerox=cv2.medianBlur(tempx,mwid)/100.0 ptemp=np.where(np.abs(aero-aerox) > twid) cls[ptemp]=-1 return aerox
def binarize(self): # ????????? ????? ??? = retval2, thres = cv2.threshold(data, 50,70,cv2.THRESH_BINARY) thres = cv2.blur(thres, (50, 50)) # ????????? ???????? = a = cv2.adaptiveThreshold(self.data, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1) # a = cv2.adaptiveThreshold(a, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 55, 1) # retval2, a = cv2.threshold(self.data, 90, 255, cv2.THRESH_BINARY) # a1 = np.median(a, 0) # plt.hist(a1, 256, range=[0, 255], fc='k', ec='k') # plt.show() self.data = a
def blur(self): px = 5 self.data = cv2.blur(self.data, (px, px)) # self.data = cv2.medianBlur(self.data, px)
def Bin(data): # ????????? ????? ??? = retval2, thres = cv2.threshold(data, 50,70,cv2.THRESH_BINARY) thres = cv2.blur(thres, (50, 50)) # ????????? ???????? = # retval2, thres = cv2.threshold(data,240,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) # thres =cv2.adaptiveThreshold(data, 255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 0) retval2, thres = cv2.threshold(data, 120,127,cv2.THRESH_BINARY) # thres = cv2.blur(thres, (50, 50)) return thres
def smooth_data(data, iterations=4): """smooth image stack :param data: NxMxF ndarray, where F is the number of frames and NxM are the image dimensions :param iterations: number of times to run the smoothing :return sv: smoothed data """ sv = data for i in range(iterations): sv[:, :, i] = cv2.blur(d.T, (3, 3)) return sv
def random_blur(image, size): if np.random.random() < 0.5: image = cv2.blur(image, size) return image
def paintGL(self, sun_x, sun_y, sun_z, moon_x, moon_y, moon_z): # Draw the sun self.fbo.bind() self.draw_sun(sun_x, sun_y, sun_z) glFlush() self.fbo.release() image = self.fbo.toImage() # Produce blurred image of sun npimage = qimage_to_numpy(image) h, w, b = npimage.shape blur = cv2.GaussianBlur(npimage, (75, 75), 0, 0) cv2.convertScaleAbs(blur, blur, 2, 1) # Combine the blurred with the sun combo = cv2.addWeighted(blur, 0.5, npimage, 0.5, -1) h, w, b = combo.shape qimage = QtGui.QImage(combo.data,w,h,QtGui.QImage.Format_ARGB32).rgbSwapped() self.fbo.bind() device = QtGui.QOpenGLPaintDevice(RES_X, RES_Y) painter = QtGui.QPainter() painter.begin(device) rect = QtCore.QRect(0, 0, RES_X, RES_Y) # Draw the blurred sun/sun combo image on the screen painter.drawImage(rect, qimage, rect) painter.end() self.fbo.release() # Draw the moon self.fbo.bind() self.draw_moon(moon_x, moon_y, moon_z) glFlush() self.fbo.release()
def smooth(image, method='gaussian', kernel=(5, 5)): ''' blur filter for noise removal. ''' if method == 'blur': return cv.blur(image, kernel) elif method =='gaussian': return cv.GaussianBlur(image, kernel, 0) else: raise ValueError('Unknown smoothing method.')
def generate_blur(img,label_img,num,ratio_h,ratio_w): rect_list = random_rect(num,img.shape,ratio_h,ratio_w) ksize = 20 handle_img = np.copy(img) rect = rect_list[0] x,y,h,w = rect roi = handle_img[y:y+h,x:x+w] handle_img[y:y+h,x:x+w] = cv2.blur(roi,(ksize,ksize)) pixelpoints,_ = convex_hull_generate(img,rect,10) label_img[pixelpoints] = 255 # print pixelpoints img[pixelpoints] = handle_img[pixelpoints]
def blur(img,label_img): generate_blur(img,label_img,1,(0.05,0.1),(0.05,0.1))
def get_bounding_rect( self, key, cap, win_cap, win, upper, lower, return_value=False, text=True ): hsv = cv2.cvtColor( cap, cv2.COLOR_BGR2HSV ) hsv = cv2.blur(hsv,(5,5)) # blur the image for smoothing msk = cv2.inRange( hsv, np.array(lower), np.array(upper) ) # get an object of all of the pixels with color values in the range # Make images smooth again! #msk = cv2.blur(msk,(5,5)) msk = cv2.erode(msk, None, iterations=3) # erode the image to reduce background noise msk = cv2.dilate(msk, None, iterations=3) # dilate the image to reduce background noise if self.settings["display"]: # if the display is true, self.show( str(win)+ " Image", msk ) # show the binary range image # Get the image contours in the mask im2, contours, hierarchy = cv2.findContours( msk, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) # If a contour was found if len(contours) > 0: areas = [cv2.contourArea(c) for c in contours] # get the area of each contour max_index = np.argmax(areas) # get the index of the largest contour by area cnts = contours[max_index] # get the largest contout by area cv2.drawContours(msk, [cnts], 0, (0,255,0), 3) # Draw the contours to the mask image x,y,w,h = cv2.boundingRect(cnts) # get the bouding box information about the contour cv2.rectangle(win_cap,(x,y),(x+w,y+h),(255,255,255),2) # Draw rectangle on the image to represent the bounding box try: self.smt_dash.putNumber('vis_x', x) self.smt_dash.putNumber('vis_y', y) self.smt_dash.putNumber('vis_w', w) self.smt_dash.putNumber('vis_h', h) except Exception: pass if text: cv2.putText( win_cap , str(key), ( x, y+h ), cv2.FONT_HERSHEY_SIMPLEX, 2, 255) if return_value: # if the function needs a return value return [ x, y, w, h ] # return an array of the bounding box values # Update function should be invoked whenever the camera frame needs refreshing # Usage: self.update( ) # This should be embedded inside a while loop
def headblur(clip,fx,fy,r_zone,r_blur=None): """ Returns a filter that will blurr a moving part (a head ?) of the frames. The position of the blur at time t is defined by (fx(t), fy(t)), the radius of the blurring by ``r_zone`` and the intensity of the blurring by ``r_blur``. Requires OpenCV for the circling and the blurring. Automatically deals with the case where part of the image goes offscreen. """ if r_blur is None: r_blur = 2*r_zone/3 def fl(gf,t): im = gf(t) h,w,d = im.shape x,y = int(fx(t)),int(fy(t)) x1,x2 = max(0,x-r_zone),min(x+r_zone,w) y1,y2 = max(0,y-r_zone),min(y+r_zone,h) region_size = y2-y1,x2-x1 mask = np.zeros(region_size).astype('uint8') cv2.circle(mask, (r_zone,r_zone), r_zone, 255, -1, lineType=cv2.CV_AA) mask = np.dstack(3*[(1.0/255)*mask]) orig = im[y1:y2, x1:x2] blurred = cv2.blur(orig,(r_blur, r_blur)) im[y1:y2, x1:x2] = mask*blurred + (1-mask)*orig return im return clip.fl(fl) #------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
def blur(self): return cv2.blur(self.imageToProcess,9)
def toGray(image): return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #Simplified gaussian blur, not to be confused with cv2.blur()