我们从Python开源项目中,提取了以下46个代码示例,用于说明如何使用cv2.CV_64F。
def try_approximate_corners_blur(self, board_dims, sharpness_threshold): sharpness = cv2.Laplacian(self.frame, cv2.CV_64F).var() if sharpness < sharpness_threshold: return False found, corners = cv2.findChessboardCorners(self.frame, board_dims) self.current_image_points = corners return found
def find_bibs(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY); binary = cv2.GaussianBlur(gray,(5,5),0) ret,binary = cv2.threshold(binary, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU); #binary = cv2.adaptiveThreshold(binary, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) #ret,binary = cv2.threshold(binary, 190, 255, cv2.THRESH_BINARY); #lapl = cv2.Laplacian(image,cv2.CV_64F) #gray = cv2.cvtColor(lapl, cv2.COLOR_BGR2GRAY); #blurred = cv2.GaussianBlur(lapl,(5,5),0) #ret,binary = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU); #cv2.imwrite("lapl.jpg", lapl) edges = cv2.Canny(image,175,200) cv2.imwrite("edges.jpg", edges) binary = edges cv2.imwrite("binary.jpg", binary) contours,hierarchy = find_contours(binary) return get_rectangles(contours)
def EdgeDetection(img): img = cv2.fastNlMeansDenoising(img,None,3,7,21) _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO) denoise_img = img laplacian = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3) # y canny = cv2.Canny(img,100,200) contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image} # GrayScale Image Convertor # https://extr3metech.wordpress.com
def MyDenoiseSobely(path): img_gray = ToGrayImage(path) img_mydenoise = MyDenoise(img_gray,5) img_denoise = cv2.fastNlMeansDenoising(img_mydenoise,None,3,7,21) _,img_thre = cv2.threshold(img_denoise,100,255,cv2.THRESH_TOZERO) sobely = cv2.Sobel(img_thre,cv2.CV_64F,0,1,ksize=3) return sobely
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)): # Apply the following steps to img # 1) Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # 2) Take the gradient in x and y separately sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # 3) Take the absolute value of the x and y gradients abs_sobelx = np.absolute(sobelx) abs_sobely = np.absolute(sobely) # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient absgraddir = np.arctan2(abs_sobely, abs_sobelx) # 5) Create a binary mask where direction thresholds are met binary_output = np.zeros_like(absgraddir) binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1 # 6) Return this mask as your binary_output image return binary_output # Define a function that applies Sobel x and y, # then computes the magnitude of the gradient # and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)): # Apply the following steps to img # 1) Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # 2) Take the gradient in x and y separately sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel) # 3) Calculate the magnitude gradmag = np.sqrt(sobelx**2 + sobely**2) # 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8 scale_factor = np.max(gradmag)/255 gradmag = (gradmag/scale_factor).astype(np.uint8) # 5) Create a binary mask where mag thresholds are met binary_output = np.zeros_like(gradmag) binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1 # 6) Return this mask as your binary_output image return binary_output # Define a function that applies Sobel x or y, # then takes an absolute value and applies a threshold. # Note: calling your function with orient='x', thresh_min=5, thresh_max=100 # should produce output like the example image shown above this quiz.
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255): # Apply the following steps to img # 1) Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # 2) Take the derivative in x or y given orient = 'x' or 'y' if orient == 'x': sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0) if orient == 'y': sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1) # 3) Take the absolute value of the derivative or gradient abs_sobel = np.absolute(sobel) # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8 scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel)) # 5) Create a mask of 1's where the scaled gradient magnitude # is > thresh_min and < thresh_max binary_output = np.zeros_like(scaled_sobel) binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1 # 6) Return this mask as your binary_output image return binary_output
def compute_inital_corner_likelihood(image): likelihoods = [] for prototype in ck.CORNER_KERNEL_PROTOTYPES: filter_responses = [cv2.filter2D(image, ddepth=cv2.CV_64F, kernel=kernel) for kernel in prototype] fA, fB, fC, fD = filter_responses mean_response = (fA + fB + fC + fD) / 4. minAB = np.minimum(fA, fB) minCD = np.minimum(fC, fD) diff1 = minAB - mean_response diff2 = minCD - mean_response # For an ideal corner, the response of {A,B} should be greater than the mean response of {A,B,C,D}, # while the response of {C,D} should be smaller, and vice versa for flipped corners. likelihood1 = np.minimum(diff1, -diff2) likelihood2 = np.minimum(-diff1, diff2) # flipped case likelihoods.append(likelihood1) likelihoods.append(likelihood2) corner_likelihood = np.max(likelihoods, axis=0) return corner_likelihood
def compute_grad(self): """ precompute gradient's magnitude and angle of pyramid where angle is between (0, 2?) """ for oct_ind, layer_ind, layer in self.enumerate(): # todo: better kernel can be used? grad_x = cv2.filter2D(layer, cv2.CV_64F, np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])) grad_y = cv2.filter2D(layer, cv2.CV_64F, np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])) grad_mag = np.sqrt(grad_x**2 + grad_y**2) grad_ang = np.arctan2(grad_y, grad_x) # each element in (-?, ?) grad_ang %= TAU # (-?, 0) is moved to (?, 2*?) self._grad_mag[oct_ind][layer_ind] = grad_mag self._grad_ang[oct_ind][layer_ind] = grad_ang
def _filter(img, method, k): if method == 'Edge gradient': sy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=k) sx = cv2.Sobel(img, ddepth=cv2.CV_64F,dx=1, dy=0, ksize=k) # sx = sobel(img, axis=0, mode='constant') # sy = sobel(img, axis=1, mode='constant') return np.hypot(sx, sy) if method == 'Sobel-H': return cv2.Sobel(img, ddepth=cv2.CV_64F,dx=0, dy=1, ksize=k) #sobel(img, axis=0, mode='constant') if method == 'Sobel-V': return cv2.Sobel(img, ddepth=cv2.CV_64F,dx=1, dy=0, ksize=k) #sobel(img, axis=1, mode='constant') if method == 'Laplace': return cv2.Laplacian(img, ddepth=cv2.CV_64F,ksize=5) #laplace(img)
def getEdges(gray,detector,min_thr=None,max_thr=None): """ Where detector in {1,2,3,4} 1: Laplacian 2: Sobelx 3: Sobely 4: Canny 5: Sobelx with possitive and negative slope (in 2 negative slopes are lost) """ if min_thr is None: min_thr = 100 max_thr = 200 if detector == 1: return cv2.Laplacian(gray,cv2.CV_64F) elif detector == 2: return cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=-1) elif detector == 3: return cv2.Sobel(gray,cv2.CV_64F,0,1,ksize=-1) elif detector == 4: return cv2.Canny(gray,min_thr,max_thr) # Canny(min_thresh,max_thresh) (threshold not to the intensity but to the # intensity gradient -value that measures how different is a pixel to its neighbors-) elif detector == 5: sobelx64f = cv2.Sobel(gray,cv2.CV_64F,1,0,ksize=5) abs_sobel64f = np.absolute(sobelx64f) return np.uint8(abs_sobel64f)
def Guidedfilter(im, p, r, eps): mean_I = cv2.boxFilter(im, cv2.CV_64F, (r, r)); mean_p = cv2.boxFilter(p, cv2.CV_64F, (r, r)); mean_Ip = cv2.boxFilter(im * p, cv2.CV_64F, (r, r)); cov_Ip = mean_Ip - mean_I * mean_p; mean_II = cv2.boxFilter(im * im, cv2.CV_64F, (r, r)); var_I = mean_II - mean_I * mean_I; a = cov_Ip / (var_I + eps); b = mean_p - a * mean_I; mean_a = cv2.boxFilter(a, cv2.CV_64F, (r, r)); mean_b = cv2.boxFilter(b, cv2.CV_64F, (r, r)); q = mean_a * im + mean_b; return q;
def Guidedfilter(im,p,r,eps): mean_I = cv2.boxFilter(im,cv2.CV_64F,(r,r)); mean_p = cv2.boxFilter(p, cv2.CV_64F,(r,r)); mean_Ip = cv2.boxFilter(im*p,cv2.CV_64F,(r,r)); cov_Ip = mean_Ip - mean_I*mean_p; mean_II = cv2.boxFilter(im*im,cv2.CV_64F,(r,r)); var_I = mean_II - mean_I*mean_I; a = cov_Ip/(var_I + eps); b = mean_p - a*mean_I; mean_a = cv2.boxFilter(a,cv2.CV_64F,(r,r)); mean_b = cv2.boxFilter(b,cv2.CV_64F,(r,r)); q = mean_a*im + mean_b; return q;
def binary_extraction(self,image, ksize=3): # undistort first #image = self.undistort(image) color_bin = self.color_thresh(image,thresh=(90, 150)) # initial values 110, 255 gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize) sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize) gradx = self.abs_sobel_thresh(sobelx, thresh=(100, 190)) # initial values 40, 160 grady = self.abs_sobel_thresh(sobely, thresh=(100, 190)) # initial values 40, 160 mag_binary = self.mag_thresh(sobelx, sobely, mag_thresh=(100, 190)) # initial values 40, 160 #dir_binary = self.dir_threshold(sobelx, sobely, thresh=(0.7, 1.3)) combined = np.zeros_like(gradx) #combined[(((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))) | (color_bin==1) ] = 1 combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) | (color_bin==1) ] = 1 #combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) ] = 1 return combined # transform perspective
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)): img = np.copy(img) # Convert to HSV color space and separate the V channel hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) l_channel = hsv[:,:,1] s_channel = hsv[:,:,2] # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Stack each channel # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might # be beneficial to replace this channel with something else. color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) return color_binary
def abs_sobel_thresh(img, orient='x', thresh_min=0, thresh_max=255): # Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Apply x or y gradient with the OpenCV Sobel() function # and take the absolute value if orient == 'x': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0)) if orient == 'y': abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1)) # Rescale back to 8 bit integer scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel)) # Create a copy and apply the threshold binary_output = np.zeros_like(scaled_sobel) # Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1 # Return the result return binary_output
def variance_of_laplacian(im): """ Compute the Laplacian of the image and then return the focus measure, which is simply the variance of the Laplacian http://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/ """ return cv2.Laplacian(im, cv2.CV_64F).var()
def variance_of_laplacian(image): return cv2.Laplacian(image, cv2.CV_64F).var()
def im_normalize(im, lo=0, hi=255, dtype='uint8'): return cv2.normalize(im, alpha=lo, beta=hi, norm_type=cv2.NORM_MINMAX, dtype={'uint8': cv2.CV_8U, \ 'float32': cv2.CV_32F, \ 'float64': cv2.CV_64F}[dtype])
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)): img = np.copy(img) # Convert to HSV color space and separate the V channel hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) l_channel = hsv[:,:,1] s_channel = hsv[:,:,2] # Sobel x sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold color channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Stack each channel # Note color_binary[:, :, 0] is all 0s, effectively an all black image. It might # be beneficial to replace this channel with something else. color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) return color_binary # Define a function that thresholds the S-channel of HLS # Use exclusive lower bound (>) and inclusive upper (<=)
def read_array(filename): with open(filename, 'rb') as fp: type_code = np.fromstring(fp.read(4), dtype=np.int32) shape_size = np.fromstring(fp.read(4), dtype=np.int32) shape = np.fromstring(fp.read(4 * shape_size), dtype=np.int32) if type_code == cv2.CV_32F: dtype = np.float32 if type_code == cv2.CV_64F: dtype = np.float64 return np.fromstring(fp.read(), dtype=dtype).reshape(shape)
def write_array(filename, array): with open(filename, 'wb') as fp: if array.dtype == np.float32: typecode = cv2.CV_32F elif array.dtype == np.float64: typecode = cv2.CV_64F else: raise ValueError("type is not supported") fp.write(np.array(typecode, dtype=np.int32).tostring()) fp.write(np.array(len(array.shape), dtype=np.int32).tostring()) fp.write(np.array(array.shape, dtype=np.int32).tostring()) fp.write(array.tostring())
def _create_derivative(cls, img): edges = cv2.Canny(img, 175, 320, apertureSize=3) # Create gradient map using Sobel sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=-1) sobely64f = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=-1) theta = np.arctan2(sobely64f, sobelx64f) if diagnostics: cv2.imwrite('edges.jpg',edges) cv2.imwrite('sobelx64f.jpg', np.absolute(sobelx64f)) cv2.imwrite('sobely64f.jpg', np.absolute(sobely64f)) # amplify theta for visual inspection theta_visible = (theta + np.pi)*255/(2*np.pi) cv2.imwrite('theta.jpg', theta_visible) return (edges, sobelx64f, sobely64f, theta)
def get_blur(frame, scale): frame = cv2.resize(frame, None, fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) fm = cv2.Laplacian(gray, cv2.CV_64F).var() return fm
def estimate_blur(image, threshold=100): if image.ndim == 3: image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blur_map = cv2.Laplacian(image, cv2.CV_64F) score = numpy.var(blur_map) return blur_map, score, bool(score < threshold)
def modifiedLaplacian(img): ''''LAPM' algorithm (Nayar89)''' M = np.array([-1, 2, -1]) G = cv2.getGaussianKernel(ksize=3, sigma=-1) Lx = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=M, kernelY=G) Ly = cv2.sepFilter2D(src=img, ddepth=cv2.CV_64F, kernelX=G, kernelY=M) FM = np.abs(Lx) + np.abs(Ly) return cv2.mean(FM)[0]
def varianceOfLaplacian(img): ''''LAPV' algorithm (Pech2000)''' lap = cv2.Laplacian(img, ddepth=-1)#cv2.cv.CV_64F) stdev = cv2.meanStdDev(lap)[1] s = stdev[0]**2 return s[0]
def tenengrad(img, ksize=3): ''''TENG' algorithm (Krotkov86)''' Gx = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=1, dy=0, ksize=ksize) Gy = cv2.Sobel(img, ddepth=cv2.CV_64F, dx=0, dy=1, ksize=ksize) FM = Gx*Gx + Gy*Gy mn = cv2.mean(FM)[0] if np.isnan(mn): return np.nanmean(FM) return mn
def seg(path): p=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Test/'+"cut"+path+"_flair.nii.gz")]) shap=p[0].shape print (shap) leng=shap[0]*shap[1]*shap[2] #pix=get_pixels(path) pc=concat(p) print (p[0].shape) px = cv2.Sobel(p[0],cv2.CV_64F,1,0,ksize=5) py = cv2.Sobel(p[0],cv2.CV_64F,0,1,ksize=5) print(time.strftime('%a %H:%M:%S')) pcx=concat1(px) pcy=concat1(py) print(time.strftime('%a %H:%M:%S')) pa=ndimage.filters.convolve(p[0],np.full((5, 5, 5), 1.0/125),mode='constant') print(time.strftime('%a %H:%M:%S')) pg=concat1(pa) print(time.strftime('%a %H:%M:%S')) X=reshape_feat(pc,pg,pcx,pcy,leng) print(time.strftime('%a %H:%M:%S')) return X
def seg(path): p=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Train/'+"cut"+path+"_flair.nii.gz")]) y=np.array([get_3d_data('../../../Cut_Brats_Training_Data/Train/'+"cut"+path[4:]+"_seg.nii.gz")]) shap=p[0].shape print (shap) leng=shap[0]*shap[1]*shap[2] #pix=get_pixels(path) pc=concat(p) yc=concat(y) print (p[0].shape) px = cv2.Sobel(p[0],cv2.CV_64F,1,0,ksize=5) py = cv2.Sobel(p[0],cv2.CV_64F,0,1,ksize=5) print(time.strftime('%a %H:%M:%S')) pcx=concat1(px) pcy=concat1(py) print(time.strftime('%a %H:%M:%S')) pa=ndimage.filters.convolve(p[0],np.full((5, 5, 5), 1.0/125),mode='constant') print(time.strftime('%a %H:%M:%S')) pg=concat1(pa) print(time.strftime('%a %H:%M:%S')) X=reshape_feat(pc,pg,pcx,pcy,leng) Y=reshape_seg(yc,leng) print(time.strftime('%a %H:%M:%S')) return X,Y
def _blur_index(self, img): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) return cv2.Laplacian(img_gray, cv2.CV_64F).var()
def match_template_mask(image, template, mask=None, method=None, sigma=0.33): """Match template against image applying mask to template using method. Method can be either of (None, 'laplacian', 'sobel', 'scharr', 'prewitt', 'roberts', 'canny'). Returns locations to look for max values.""" if mask is not None: if method: kernel = np.ones((3, 3), np.uint8) mask = cv2.erode(mask, kernel) if method == 'laplacian': # use CV_64F to not loose edges, convert to uint8 afterwards edge_image = np.uint8(np.absolute( cv2.Laplacian(image, cv2.CV_64F))) edge_template = np.uint8(np.absolute( cv2.Laplacian(template, cv2.CV_64F) )) elif method in ('sobel', 'scharr', 'prewitt', 'roberts'): filter_func = getattr(skfilters, method) edge_image = filter_func(image) edge_template = filter_func(template) edge_image = convert(edge_image) edge_template = convert(edge_template) else: # method == 'canny' values = np.hstack([image.ravel(), template.ravel()]) median = np.median(values) lower = int(max(0, (1.0 - sigma) * median)) upper = int(min(255, (1.0 + sigma) * median)) edge_image = cv2.Canny(image, lower, upper) edge_template = cv2.Canny(template, lower, upper) results = cv2.matchTemplate(edge_image, edge_template & mask, cv2.TM_CCOEFF_NORMED) else: results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED, mask) else: results = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED) return results
def HLS_sobel(img, s_thresh=(120, 255), sx_thresh=(20, 255),l_thresh=(40,255)): img = np.copy(img) # Convert to HLS color space and separate the V channel hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) #h_channel = hls[:,:,0] l_channel = hls[:,:,1] s_channel = hls[:,:,2] # Sobel x # sobelx = abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)) # l_channel_col=np.dstack((l_channel,l_channel, l_channel)) sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx)) # Threshold x gradient sxbinary = np.zeros_like(scaled_sobel) sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 # Threshold saturation channel s_binary = np.zeros_like(s_channel) s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 # Threshold lightness l_binary = np.zeros_like(l_channel) l_binary[(l_channel >= l_thresh[0]) & (l_channel <= l_thresh[1])] = 1 channels = 255*np.dstack(( l_binary, sxbinary, s_binary)).astype('uint8') binary = np.zeros_like(sxbinary) binary[((l_binary == 1) & (s_binary == 1) | (sxbinary==1))] = 1 binary = 255*np.dstack((binary,binary,binary)).astype('uint8') return binary,channels
def global_gradient(self): gradient_values_x = cv2.Sobel(self.img, cv2.CV_64F, 1, 0, ksize=5) gradient_values_y = cv2.Sobel(self.img, cv2.CV_64F, 0, 1, ksize=5) gradient_magnitude = cv2.addWeighted(gradient_values_x, 0.5, gradient_values_y, 0.5, 0) gradient_angle = cv2.phase(gradient_values_x, gradient_values_y, angleInDegrees=True) return gradient_magnitude, gradient_angle
def get_frame(self): ret, frame = self.cap.read() laplacian = cv2.Laplacian(frame,cv2.CV_64F) cv2.imwrite('image.jpg',np.hstack((frame,laplacian))) return open('image.jpg', 'rb').read()
def get_frame(self): ret, frame = self.cap.read() laplacian = cv2.Laplacian(frame,cv2.CV_64F) cv2.imwrite('imagewritten.jpg',np.hstack((frame,laplacian))) return open('imagewritten.jpg', 'rb').read()
def analyse_isomaps(self): print ('analysing isomaps...') for example in self.examples_all: img = cv2.imread(example.images[0], cv2.IMREAD_UNCHANGED) #blurryness_map = cv2.Laplacian(img, cv2.CV_64F) #blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges #example.blurryness = blurryness_map.var() example.blurryness = _get_gradient_magnitude(img) example.coverage = _calc_isomap_coverage(img)
def isomap_playground(): isomaps =[] for i in range(len(isomap_paths)): isomaps.append(cv2.imread(isomap_paths[i], cv2.IMREAD_UNCHANGED)) old_isomap_merged = np.zeros([ISOMAP_SIZE, ISOMAP_SIZE, 4], dtype='uint8') all_isomaps_merged = merge(isomaps) show_isomap('all_isomaps_merged', all_isomaps_merged) #cv2.waitKey() #cv2.destroyAllWindows() #exit() for i in range(len(isomaps)): new_isomap_merged = merge([old_isomap_merged, isomaps[i]]) #blurryness = cv2.Laplacian(isomaps[i], cv2.CV_64F).var() blurryness_map = cv2.Laplacian(isomaps[i], cv2.CV_64F) blurryness_map[np.logical_or(blurryness_map<-700, blurryness_map>700)]=0 #try to filter out the edges blurryness = blurryness_map.var() #show_isomap('laplac',cv2.Laplacian(isomaps[i], cv2.CV_8U)) #print ('max', np.max(cv2.Laplacian(isomaps[i], cv2.CV_64F)), 'min', np.min(cv2.Laplacian(isomaps[i], cv2.CV_64F))) coverage = calc_isomap_coverage(isomaps[i]) print(isomap_paths[i]," isomap coverage:",coverage,"blur detection:",blurryness, "overall score", coverage*coverage*blurryness) show_isomap('new isomap', isomaps[i]) show_isomap('merge', new_isomap_merged) cv2.waitKey() old_isomap_merged = new_isomap_merged #cv2.imwrite('/user/HS204/m09113/Desktop/merge_test.png', isomap_merged) #cv2.waitKey() #cv2.destroyAllWindows()
def parse_array(array): type_code = np.asscalar(np.fromstring(array[0:4], dtype=np.int32)) shape_size = np.asscalar(np.fromstring(array[4:8], dtype=np.int32)) shape = np.fromstring(array[8: 8+4 * shape_size], dtype=np.int32) if type_code == 5:#cv2.CV_32F: dtype = np.float32 if type_code == 6:#cv2.CV_64F: dtype = np.float64 return np.fromstring(array[8+4 * shape_size:], dtype=dtype).reshape(shape)
def EdgeDetection(img): # img = cv2.medianBlur(img,5) img = cv2.fastNlMeansDenoising(img,None,3,7,21) _,img = cv2.threshold(img,30,255,cv2.THRESH_TOZERO) denoise_img = img # print(img) # cv2.imwrite("Denoise.jpg",img) # cv2.waitKey(0) # cv2.destroyAllWindows() # convolute with proper kernels laplacian = cv2.Laplacian(img,cv2.CV_64F) sobelx = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5) # x sobely = cv2.Sobel(img,cv2.CV_64F,0,1,ksize=3) # y # sobel2y = cv2.Sobel(sobely,cv2.CV_64F,0,1,ksize=3) # sobelxy = cv2.Sobel(img,cv2.CV_64F,1,1,ksize=5) # y canny = cv2.Canny(img,100,200) contour_image, contours, hierarchy = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # print(canny) # cv2.imwrite('laplacian.jpg',laplacian) # cv2.imwrite('sobelx.jpg',sobelx) # cv2.imwrite('sobely.jpg',sobely) # cv2.imwrite('sobelxy.jpg',sobelxy) # cv2.imwrite('canny.jpg',canny) # plt.subplot(3,2,1),plt.imshow(img,cmap = 'gray') # plt.title('Original'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,2),plt.imshow(laplacian,cmap = 'gray') # plt.title('Laplacian'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,3),plt.imshow(sobelx,cmap = 'gray') # plt.title('Sobel X'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,4),plt.imshow(sobely,cmap = 'gray') # plt.title('Sobel Y'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,4),plt.imshow(sobelxy,cmap = 'gray') # plt.title('Sobel XY'), plt.xticks([]), plt.yticks([]) # plt.subplot(3,2,5),plt.imshow(canny,cmap = 'gray') # plt.title('Canny'), plt.xticks([]), plt.yticks([]) # plt.show() # return {"denoise":img} return {"denoise":denoise_img,"laplacian":laplacian,"canny":canny,"sobely":sobely,"sobelx":sobelx,"contour":contour_image}