我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用skimage.morphology.disk()。
def disk_dilation(self, radius=5, iterations=1): """ This function ... :param radius: :return: """ structure = morphology.disk(radius, dtype=bool) data = ndimage.binary_dilation(self, structure, iterations) # Return the dilated mask #data, name=None, description=None return Mask(data, name=self.name, description=self.description) # -----------------------------------------------------------------
def trim_edge_cube(cube): """ trim_edge_cube: Function that reads in a cube and removes the edges in the cube. It runs the erode function to make sure that pixels within 3 pixels away from the edges are blanked. This is useful to remove very noisy pixels due to lower coverage by KFPA. ---------------------------------------- Warning: This function modifies the cube. """ # mask = np.isfinite(cube) if len(cube.shape) == 2: mask_2d = mask[:,:] else: mask_2d = mask[0,:,:] # remove image edges mask_2d[:,0] = mask_2d[:,-1] = False mask_2d[0,:] = mask_2d[-1,:] = False # now erode image (using disk) and convert back to 3D mask # then replace all voxels with NaN mask &= erosion(mask_2d,disk(5)) cube[~mask] = np.nan
def __init__(self, settings=None): """ Skin is detected using color ranges. The possible settings are: - skin_type: The type of skin most expected in the given images. The value can be 'general' or 'none'. If 'none' is given the an empty mask is returned. (default: 'general') """ if settings is None: settings = {} super(Skin, self).__init__(settings) self._k = skm.disk(1, np.bool) t = self._settings['skin_type'] if t == 'general': self._lo = np.array([0, 0.19, 0.31], np.float64) self._up = np.array([0.1, 1., 1.], np.float64) elif t != 'none': raise NotImplementedError('Only general type is implemented')
def input_wrapper(f): image = misc.imread(f) sx,sy = image.shape diff = np.abs(sx-sy) sx,sy = image.shape image = np.pad(image,((sx//8,sx//8),(sy//8,sy//8)),'constant') if sx > sy: image = np.pad(image,((0,0),(diff//2,diff//2)),'constant') else: image = np.pad(image,((diff//2,diff//2),(0,0)),'constant') image = dilation(image,disk(max(sx,sy)/32)) image = misc.imresize(image,(32,32)) if np.max(image) > 1: image = image/255. return image
def input_wrapper(f): image = misc.imread(f) # image[image>50]=255 # image[image<=50]=0 sx,sy = image.shape diff = np.abs(sx-sy) sx,sy = image.shape image = np.pad(image,((sx//8,sx//8),(sy//8,sy//8)),'constant') if sx > sy: image = np.pad(image,((0,0),(diff//2,diff//2)),'constant') else: image = np.pad(image,((diff//2,diff//2),(0,0)),'constant') image = dilation(image,disk(max(sx,sy)/32)) image = misc.imresize(image,(32,32)) if np.max(image) > 1: image = image/255. return image
def erosion(x, radius=3): """ Return greyscale morphological erosion of an image, see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`_. Parameters ----------- x : 2D array image. radius : int for the radius of mask. """ from skimage.morphology import disk, dilation, erosion mask = disk(radius) x = erosion(x, selem=mask) return x ## Object Detection
def median_filter(piv, size=2): """Computes a median filter on u and v""" piv.u = mf(piv.u, footprint=disk(size)) piv.v = mf(piv.v, footprint=disk(size))
def get_segmented_lungs(im, plot=False): # Step 1: Convert into a binary image. binary = im < -400 # Step 2: Remove the blobs connected to the border of the image. cleared = clear_border(binary) # Step 3: Label the image. label_image = label(cleared) # Step 4: Keep the labels with 2 largest areas. areas = [r.area for r in regionprops(label_image)] areas.sort() if len(areas) > 2: for region in regionprops(label_image): if region.area < areas[-2]: for coordinates in region.coords: label_image[coordinates[0], coordinates[1]] = 0 binary = label_image > 0 # Step 5: Erosion operation with a disk of radius 2. This operation is seperate the lung nodules attached to the blood vessels. selem = disk(2) binary = binary_erosion(binary, selem) # Step 6: Closure operation with a disk of radius 10. This operation is to keep nodules attached to the lung wall. selem = disk(10) # CHANGE BACK TO 10 binary = binary_closing(binary, selem) # Step 7: Fill in the small holes inside the binary mask of lungs. edges = roberts(binary) binary = ndi.binary_fill_holes(edges) # Step 8: Superimpose the binary mask on the input image. get_high_vals = binary == 0 im[get_high_vals] = -2000 return im, binary
def normalize(x): y = np.copy(x); minv = -1000; y[x<minv]=minv; maxv = 200; y[x>maxv]=maxv return ((y*1.0-minv)/(maxv-minv)*255).astype(np.uint8);#0-255, to save disk space
def outline_polygons(self, width=EDGE_WIDTH, color=LABEL_EDGE): from skimage.morphology import binary_dilation, disk im = np.asarray(self.image).copy() outset = binary_dilation(im == LABEL_POSITIVE, disk(width / 2)) inset = binary_dilation(im != LABEL_POSITIVE, disk(width - width / 2)) boundary = outset & inset im[boundary] = color self.image = Image.fromarray(im) self.artist = ImageDraw.Draw(self.image)
def _create_facade_mask(self): facade_mask = self.driving_layers.building() > 0.5 facade_mask = binary_erosion(facade_mask, disk(10)) # Sky is noisy # Remove non-wall elements from the facade (we want just the wall) facade_mask[self.window_mask()] = 0 facade_mask[self.facade_layers.door() > 0.5] = 0 facade_mask[self.balcony_mask()] = 0 # facade_mask[self.shop_mask()] = 0 facade_mask[self.pillar_mask()] = 0 facade_mask[self.facade_layers.molding() > 0.5] = 0 return facade_mask
def extract_boxes_as_dictionaries(image, threshold=0.5, se=disk(3)): mask = image > threshold mask = binary_opening(mask, selem=se) try: props = regionprops(label(mask)) def _tag(tlbr): t, l, b, r = tlbr return dict(top=int(t), left=int(l), bottom=int(b), right=int(r)) result = [_tag(r.bbox) for r in props] except (ValueError, TypeError) as e: result = [] return result
def cv2_morph_close(binary_image, size=5): import cv2 from skimage.morphology import disk kernel = disk(size) result = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel) return result
def cv2_morph_open(binary_image, size=5): import cv2 from skimage.morphology import disk kernel = disk(size) result = cv2.morphologyEx(binary_image, cv2.MORPH_OPEN, kernel) return result
def get_unclassified_defect_region(classified_defect_region, td_detect, radius): # Expand topological defects by radius td_region = morphology.binary_dilation((td_detect != 0).astype(np.int), morphology.disk(radius)) # Remove classified region unclassified_defect_region = np.multiply(td_region, 1 - classified_defect_region) unclassified_defect_region = morphology.binary_dilation(unclassified_defect_region, morphology.disk(radius)) unclassified_defect_region = morphology.binary_erosion(unclassified_defect_region, morphology.disk(radius)) return unclassified_defect_region
def plot_defect_classifications(bmp, list_of_classified_defects, unclassified_defect_region, td_classify, defect_free_region): plt.rcParams['figure.figsize'] = (10.0, 10.0); plt.set_cmap('gray'); fig = plt.figure(); ax = fig.add_subplot(111); fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None); # Plot the labeled defect regions on top of the temperature field bmp[defect_free_region==1.] = 0.5*bmp[defect_free_region==1.] # Defect-free region txt_out = [] for defect in list_of_classified_defects: defect_center = centroid(defect['defect_region']) outline = defect['defect_region'] ^ morphology.binary_dilation(defect['defect_region'],morphology.disk(2)) bmp[outline==1] = 255 txt = ax.annotate(DEFECT_TYPES[defect['defect_type']],(defect_center[0]-5,defect_center[1]), color='white', fontweight='bold', fontsize=10); txt.set_path_effects([PathEffects.withStroke(linewidth=2, foreground='k')]); txt_out.append(txt) unknown_td = np.multiply(unclassified_defect_region, (td_classify != 0).astype(np.int)) bmp[morphology.binary_dilation(unknown_td,morphology.disk(2))==1] = 0 bmp[morphology.binary_dilation(unknown_td,morphology.disk(1))==1] = 255 frame = ax.imshow(bmp); ax.axis('off'); return fig, ax, frame, txt_out
def remove_appendages(self, super=False): """ This function ... :return: """ if super: structure = morphology.disk(5, dtype=bool) else: structure = np.array([[False, True, True, True, False], [True, True, True, True, True], [True, True, True, True, True], [True, True, True, True, True], [False, True, True, True, False]]) mask = self.opening(structure) segments = detect_sources(mask, 0.5, 1).data # Get the label of the center segment label = segments[int(0.5*segments.shape[0]), int(0.5*segments.shape[1])] # Return the new mask with the appendages removed #data, name=None, description=None return Mask((segments == label), name=self.name, description=self.description) # -----------------------------------------------------------------
def smooth(self): # TODO: there is non nan in the ff img, or? mask = self.flatField == 0 from skimage.filters.rank import median, mean from skimage.morphology import disk ff = mean(median(self.flatField, disk(5), mask=~mask), disk(13), mask=~mask) return ff.astype(float) / ff.max(), mask
def input_wrapper_arr(self,image): sx,sy = image.shape diff = np.abs(sx-sy) sx,sy = image.shape image = np.pad(image,((sx//8,sx//8),(sy//8,sy//8)),'constant') if sx > sy: image = np.pad(image,((0,0),(diff//2,diff//2)),'constant') else: image = np.pad(image,((diff//2,diff//2),(0,0)),'constant') image = dilation(image,disk(max(sx,sy)/32)) image = misc.imresize(image,(32,32)) if np.max(image) > 1: image = image/255. return image
def predict_mask(image_id): mi = np.load('cache/images/%s_MI.npy' % image_id) return binary_dilation(mi[1] < 0, disk(3))[np.newaxis, :, :].astype(np.uint8)
def binary_dilation(x, radius=3): """ Return fast binary morphological dilation of an image. see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`_. Parameters ----------- x : 2D array image. radius : int for the radius of mask. """ from skimage.morphology import disk, binary_dilation mask = disk(radius) x = binary_dilation(x, selem=mask) return x
def dilation(x, radius=3): """ Return greyscale morphological dilation of an image, see `skimage.morphology.dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.dilation>`_. Parameters ----------- x : 2D array image. radius : int for the radius of mask. """ from skimage.morphology import disk, dilation mask = disk(radius) x = dilation(x, selem=mask) return x
def binary_erosion(x, radius=3): """ Return binary morphological erosion of an image, see `skimage.morphology.binary_erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion>`_. Parameters ----------- x : 2D array image. radius : int for the radius of mask. """ from skimage.morphology import disk, dilation, binary_erosion mask = disk(radius) x = binary_erosion(x, selem=mask) return x
def _try_black_tophat(self, roi, cur_text, cur_mrz): roi_b = morphology.black_tophat(roi, morphology.disk(5)) new_text = ocr(roi_b) # There are some examples where this line basically hangs for an undetermined amount of time. new_mrz = MRZ.from_ocr(new_text) if new_mrz.valid_score > cur_mrz.valid_score: new_mrz.aux['method'] = 'black_tophat' cur_text, cur_mrz = new_text, new_mrz new_text, new_mrz = self._try_larger_image(roi_b, cur_text, cur_mrz) if new_mrz.valid_score > cur_mrz.valid_score: new_mrz.aux['method'] = 'black_tophat(rescaled(3))' cur_text, cur_mrz = new_text, new_mrz return cur_text, cur_mrz
def patches_by_entropy(self, num_patches): """ Finds high-entropy patches based on label, allows net to learn borders more effectively. :param num_patches: int, defaults to num_samples, enter in quantity it using in conjunction with randomly sampled patches. :return: list of patches (num_patches, 4, h, w) selected by highest entropy """ patches, labels = [], [] ct = 0 while ct < num_patches: im_path = random.choice(self.train_data) fn = os.path.basename(im_path) label = io.imread('Labels/' + fn[:-4] + 'L.png') # pick again if slice is only background if len(np.unique(label)) == 1: continue img = io.imread(im_path).reshape(5, 240, 240)[:-1].astype('float') l_ent = entropy(label, disk(self.h)) top_ent = np.percentile(l_ent, 90) # restart if 80th entropy percentile = 0 if top_ent == 0: continue highest = np.argwhere(l_ent >= top_ent) p_s = random.sample(highest, 3) for p in p_s: p_ix = (p[0] - (self.h / 2), p[0] + ((self.h + 1) / 2), p[1] - (self.w / 2), p[1] + ((self.w + 1) / 2)) patch = np.array([i[p_ix[0]: p_ix[1], p_ix[2]: p_ix[3]] for i in img]) # exclude any patches that are too small if np.shape(patch) != (4, 65, 65): continue patches.append(patch) labels.append(label[p[0], p[1]]) ct += 1 return np.array(patches[:self.num_samples]), np.array(labels[:self.num_samples])
def patches_by_entropy(self, num_patches): ''' Finds high-entropy patches based on label, allows net to learn borders more effectively. INPUT: int 'num_patches': defaults to num_samples, enter in quantity it using in conjunction with randomly sampled patches. OUTPUT: list of patches (num_patches, 4, h, w) selected by highest entropy ''' h,w = self.patch_size[0], self.patch_size[1] patches, labels = [], [] ct = 0 while ct < num_patches: #im_path = random.choice(training_images) im_path = random.choice(self.train_data) fn = os.path.basename(im_path) label = io.imread('Labels/' + fn[:-4] + 'L.png') # pick again if slice is only background if len(np.unique(label)) == 0: continue img = io.imread(im_path).reshape(5, 240, 240)[:-1].astype('float') l_ent = entropy(label, disk(self.h)) top_ent = np.percentile(l_ent, 90) # restart if 80th entropy percentile = 0 if top_ent == 0: continue highest = np.argwhere(l_ent >= top_ent) p_s = random.sample(highest, 1) for p in p_s: p_ix = (p[0]-(h/2), p[0]+((h+1)/2), p[1]-(w/2), p[1]+((w+1)/2)) patch = np.array([i[p_ix[0]:p_ix[1], p_ix[2]:p_ix[3]] for i in img]) # exclude any patches that are too small if np.shape(patch) != (4,65,65): continue patches.append(patch) labels.append(label[p[0],p[1]]) #print '**in patches_by_entropy,patches.shape:',np.array(patches).shape #(3,4,65,65) #print '**in patches_by_entropy,labels.shape:',np.array(labels).shape ct += 1 return np.array(patches[:num_patches]), np.array(labels[:num_patches])
def patches_by_entropy(self, num_patches): ''' Finds high-entropy patches based on label, allows net to learn borders more effectively. INPUT: int 'num_patches': defaults to batch_size, enter in quantity it using in conjunction with randomly sampled patches. OUTPUT: list of patches (num_patches, 4, h, w) selected by highest entropy ''' h,w = self.patch_size[0], self.patch_size[1] patches, labels = [], [] ct = 0 while ct < num_patches: #im_path = random.choice(training_images) im_path = random.choice(self.train_data) fn = os.path.basename(im_path) label = io.imread('Labels/' + fn[:-4] + 'L.png') # pick again if slice is only background if len(np.unique(label)) == 0: continue img = io.imread(im_path).reshape(5, 240, 240)[:-1].astype('float') l_ent = entropy(label, disk(self.h)) top_ent = np.percentile(l_ent, 90) # restart if 80th entropy percentile = 0 if top_ent == 0: continue highest = np.argwhere(l_ent >= top_ent) p_s = random.sample(highest, 1) for p in p_s: p_ix = (p[0]-(h/2), p[0]+((h+1)/2), p[1]-(w/2), p[1]+((w+1)/2)) patch = np.array([i[p_ix[0]:p_ix[1], p_ix[2]:p_ix[3]] for i in img]) # exclude any patches that are too small if np.shape(patch) != (4,65,65): continue patches.append(patch) labels.append(label[p[0],p[1]]) #print '**in patches_by_entropy,patches.shape:',np.array(patches).shape #(3,4,65,65) #print '**in patches_by_entropy,labels.shape:',np.array(labels).shape ct += 1 return np.array(patches[:num_patches]), np.array(labels[:num_patches])
def seperate_lungs(image): #Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = generate_markers(image) #Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) #Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) #Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3,3)) outline = outline.astype(bool) #Performing Black-Tophat Morphology for reinclusion #Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) #Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) #Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) #Close holes in the lungfilter #fill_holes is not used here, since in some slices the heart would be reincluded by accident ##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5)) ### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more #Apply the lungfilter (note the filtered areas being assigned -2000 HU) segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) ### was -2000 return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def seperate_lungs_3d(image): #Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = generate_markers_3d(image) #Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, axis=2) sobel_filtered_dy = ndimage.sobel(image, axis=1) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) #Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) #Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(1,3,3)) outline = outline.astype(bool) #Performing Black-Tophat Morphology for reinclusion #Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) blackhat_struct = blackhat_struct[np.newaxis,:,:] #Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) # very long time #Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) #Close holes in the lungfilter #fill_holes is not used here, since in some slices the heart would be reincluded by accident ##structure = np.ones((BINARY_CLOSING_SIZE,BINARY_CLOSING_SIZE)) # 5 is not enough, 7 is structure = morphology.disk(BINARY_CLOSING_SIZE) # better , 5 seems sufficient, we use 7 for safety/just in case structure = structure[np.newaxis,:,:] lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=structure, iterations=3) #, iterations=3) # was structure=np.ones((5,5)) ### NOTE if no iterattions, i.e. default 1 we get holes within lungs for the disk(5) and perhaps more #Apply the lungfilter (note the filtered areas being assigned -2000 HU) segmented = np.where(lungfilter == 1, image, -2000*np.ones(marker_internal.shape)) return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed
def segment_lung_mask(image, speedup=4): def largest_label_volume(im, bg=-1): vals, counts = np.unique(im, return_counts=True) counts = counts[vals != bg] vals = vals[vals != bg] if len(counts) > 0: return vals[np.argmax(counts)] else: return None if speedup>1: smallImage = transform.downscale_local_mean(image,(1,speedup,speedup)); else: smallImage = image; # not actually binary, but 1 and 2. # 0 is treated as background, which we do not want binary_image = np.array((smallImage < -320) & (smallImage>-1400), dtype=np.int8) #return binary_image; for i, axial_slice in enumerate(binary_image): axial_slice = 1-axial_slice labeling = measure.label(axial_slice) l_max = largest_label_volume(labeling, bg=0) if l_max is not None: #This slice contains some lung binary_image[i][(labeling!=l_max)] = 1 # Remove other air pockets insided body labels = measure.label(binary_image, background=0) m = labels.shape[0]//2; check_layers = labels[m-12:m+20:4,:,:]; l_max = largest_label_volume(check_layers, bg=0) while l_max is not None: # There are air pockets idx = np.where(check_layers==l_max); ii = np.vstack(idx[1:]).flatten(); if np.max(ii)>labels.shape[1]-24/speedup or np.min(ii)<24/speedup: binary_image[labels==l_max] = 0; labels = measure.label(binary_image, background=0) m = labels.shape[0]//2; check_layers = labels[m-12:m+20:4,:,:]; l_max = largest_label_volume(check_layers, bg=0) else: binary_image[labels != l_max] = 0 break if speedup<=1: return binary_image else: res = np.zeros(image.shape,dtype=np.uint8); for i,x in enumerate(binary_image): orig = np.copy(x); x = binary_dilation(x,disk(5)) x = binary_erosion(x,disk(5)) x = np.logical_or(x,orig) y = transform.resize(x*1.0,image.shape[1:3]); res[i][y>0.5]=1 return res;
def unet_candidates(): cands = glob.glob("../data/predictions_epoch9_23_all/*.png") #df = pd.DataFrame(columns=['seriesuid','coordX','coordY','coordZ','class']) data = [] imname = "" origin = [] spacing = [] nrimages = 0 for name in tqdm(cands): #image = imread(name) image_t = imread(name) image_t = image_t.transpose() #Thresholding image_t[image_t<THRESHOLD] = 0 image_t[image_t>0] = 1 #erosion selem = morphology.disk(1) image_eroded = image_t image_eroded = morphology.binary_erosion(image_t,selem=selem) label_im, nb_labels = ndimage.label(image_eroded) imname3 = os.path.split(name)[1].replace('.png','') splitted = imname3.split("slice") slice = splitted[1] imname2 = splitted[0][:-1] centers = [] for i in xrange(1,nb_labels+1): blob_i = np.where(label_im==i,1,0) mass = center_of_mass(blob_i) centers.append([mass[1],mass[0]]) if imname2 != imname: if os.path.isfile("../data/1_1_1mm_512_x_512_annotation_masks/spacings/{0}.pickle".format(imname2)): with open("../data/1_1_1mm_512_x_512_annotation_masks/spacings/{0}.pickle".format(imname2), 'rb') as handle: dic = pickle.load(handle) origin = dic["origin"] spacing = dic["spacing"] imname = imname2 nrimages +=1 for center in centers: coords = voxel_2_world([int(slice),center[1]+(512-324)*0.5,center[0]+(512-324)*0.5],origin,spacing) data.append([imname2,coords[2],coords[1],coords[0],'?']) #if nrimages == 5: # break df = pd.DataFrame(data,columns=CANDIDATES_COLUMNS) save_candidates("../data/candidates_unet_final_23.csv",df)
def estimate_background(self, method, sigma_clip=True, sigma_level=3.0): """ This function ... :param method: :param sigma_clip: :param sigma_level: :return: """ # Make a distinction between the "PTS" way of estimating the background and all other methods. # For the PTS way, in case sigma clipping is disabled, this means it is disabled only for the 'polynomial fitting step' # of the background estimation, so provide two distinct masks to the interpolated() method: the clipped mask for # the 'background noise' estimation and the non-clipped mask for the 'polynomial fitting step'. if method == "pts": if sigma_clip: try: mask = statistics.sigma_clip_mask(self.cutout, sigma_level=sigma_level, mask=self.mask) except TypeError: #plotting.plot_box(self.cutout) #plotting.plot_mask(self.mask) #print("xsize", self.cutout.xsize, self.cutout.ysize) radius = int(round(0.25 * self.cutout.xsize)) #print("radius", 0.25*self.cutout.xsize, radius) disk = morphology.disk(radius, dtype=bool) mask = Mask.empty_like(self.cutout) x_min = int(round(0.5 * (self.cutout.xsize - disk.shape[1]))) y_min = int(round(0.5 * (self.cutout.ysize - disk.shape[0]))) #plotting.plot_mask(mask) mask[y_min:y_min+disk.shape[0], x_min:x_min+disk.shape[1]] = disk #plotting.plot_mask(mask) no_clip_mask = None else: mask = statistics.sigma_clip_mask(self.cutout, sigma_level=sigma_level, mask=self.mask) no_clip_mask = self.mask else: # Perform sigma-clipping on the background if requested if sigma_clip: mask = statistics.sigma_clip_mask(self.cutout, sigma_level=sigma_level, mask=self.mask) else: mask = self.mask no_clip_mask = None if self.contamination is not None: mask = mask + self.contamination if no_clip_mask is not None: no_clip_mask = no_clip_mask + self.contamination # Perform the interpolation self.background = self.cutout.interpolated(mask, method, no_clip_mask=no_clip_mask, plot=self.special) if self.special: self.plot(title="background estimated") # -----------------------------------------------------------------
def get_masks(im): ''' Step 1: Convert into a binary image. ''' print('step1') binary = im < 604 # plt.imshow(binary,cmap=plt.cm.gray) # plt.show() ''' Step 2: Remove the blobs connected to the border of the image. ''' print('step2') cleared = clear_border(binary) # plt.imshow(cleared,cmap=plt.cm.gray) # plt.show() ''' Step 3: Label the image. ''' print('step3') label_image = label(cleared) # plt.imshow(label_image,cmap=plt.cm.gray) # plt.show() ''' Step 4: Keep the labels with 2 largest areas. ''' print('step4') areas = [r.area for r in regionprops(label_image)] areas.sort() if len(areas) > 2: for region in regionprops(label_image): if region.area < 10 and region.area > 3: print(region.centroid,region.area) # print(region.area) centroid = region.centroid plot_im(im,centroid) # label_image[int(centroid[0]),int(centroid[1])] = 1000 # for coordinates in region.coords: # label_image[coordinates[0], coordinates[1]] = 0 # binary = label_image > 999 # plt.imshow(binary,cmap=plt.cm.gray) # plt.show() ''' Step 5: Erosion operation with a disk of radius 2. This operation is seperate the lung nodules attached to the blood vessels. ''' # print('step5') # selem = disk(2) # binary = binary_erosion(binary, selem) # plt.imshow(binary,cmap=plt.cm.gray) # plt.show()
def img_tesseract_detect(c_rect, im): # ????minAreaRect??????-90~0?????????????????? # ??????????????????????????????????????? pts = c_rect.reshape(4, 2) rect = np.zeros((4, 2), dtype = "float32") # the top-left point has the smallest sum whereas the # bottom-right has the largest sum s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[3] = pts[np.argmax(s)] # compute the difference between the points -- the top-right # will have the minumum difference and the bottom-left will # have the maximum difference diff = np.diff(pts, axis = 1) rect[2] = pts[np.argmin(diff)] rect[1] = pts[np.argmax(diff)] dst = np.float32([[0,0],[0,100],[200,0],[200,100]]) M = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(im, M, (200, 100)) img_show_hook("??????", warp) warp = np.array(warp, dtype=np.uint8) radius = 10 selem = disk(radius) #????????OTSU???? local_otsu = rank.otsu(warp, selem) l_otsu = np.uint8(warp >= local_otsu) l_otsu *= 255 kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(4, 4)) l_otsu = cv2.morphologyEx(l_otsu, cv2.MORPH_CLOSE, kernel) img_show_hook("?????OTSU??", l_otsu) print("?????") print(pytesseract.image_to_string(Image.fromarray(l_otsu))) cv2.waitKey(0) return
def img_tesseract_detect(c_rect, im): # ????minAreaRect??????-90~0?????????????????? # ??????????????????????????????????????? pts = c_rect.reshape(4, 2) rect = np.zeros((4, 2), dtype = "float32") # the top-left point has the smallest sum whereas the # bottom-right has the largest sum s = pts.sum(axis = 1) rect[0] = pts[np.argmin(s)] rect[3] = pts[np.argmax(s)] # compute the difference between the points -- the top-right # will have the minumum difference and the bottom-left will # have the maximum difference diff = np.diff(pts, axis = 1) rect[2] = pts[np.argmin(diff)] rect[1] = pts[np.argmax(diff)] width = rect[3][0] - rect[0][0] height = rect[3][1] - rect[0][1] width = (int)((50.0 / height) * width) height = 50 dst = np.float32([[0,0],[0,height],[width,0],[width,height]]) M = cv2.getPerspectiveTransform(rect, dst) warp = cv2.warpPerspective(im, M, (width, height)) img_show_hook("??????", warp) warp = np.array(warp, dtype=np.uint8) radius = 13 selem = disk(radius) #????????OTSU???? local_otsu = rank.otsu(warp, selem) l_otsu = np.uint8(warp >= local_otsu) l_otsu *= 255 kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2,2)) l_otsu = cv2.morphologyEx(l_otsu, cv2.MORPH_CLOSE, kernel) img_show_hook("?????OTSU??", l_otsu) print("?????") print(pytesseract.image_to_string(Image.fromarray(l_otsu), lang="chi-sim")) cv2.waitKey(0) return
def unet_candidates(): cands = glob.glob('%s/*.png' % sys.argv[1]) #"/razberry/workspace/dsb_nodule_detection.109fd54/*.png #df = pd.DataFrame(columns=['seriesuid','coordX','coordY','coordZ','class']) data = [] imname = "" origin = [] spacing = [] nrimages = 0 for name in tqdm(cands): #image = imread(name) image_t = imread(name) image_t = image_t.transpose() #Thresholding image_t[image_t<THRESHOLD] = 0 image_t[image_t>0] = 1 #erosion selem = morphology.disk(1) image_eroded = image_t image_eroded = morphology.binary_erosion(image_t,selem=selem) label_im, nb_labels = ndimage.label(image_eroded) imname3 = os.path.split(name)[1].replace('.png','') splitted = imname3.split("_") slice = splitted[1] imname2 = splitted[0][:-1] centers = [] for i in xrange(1,nb_labels+1): blob_i = np.where(label_im==i,1,0) mass = center_of_mass(blob_i) centers.append([mass[1],mass[0]]) if imname2 != imname: if os.path.isfile("../data/1_1_1mm_512_x_512_annotation_masks/spacings/{0}.pickle".format(imname2)): with open("../data/1_1_1mm_512_x_512_annotation_masks/spacings/{0}.pickle".format(imname2), 'rb') as handle: dic = pickle.load(handle) origin = dic["origin"] spacing = dic["spacing"] imname = imname2 nrimages +=1 for center in centers: # coords = voxel_2_world([int(slice),center[1]+(512-324)*0.5,center[0]+(512-324)*0.5],origin,spacing) coords = [int(slice),center[1],center[0]] data.append([imname2,coords[2],coords[1],coords[0],'?']) #if nrimages == 5: # break df = pd.DataFrame(data,columns=CANDIDATES_COLUMNS) save_candidates('%s/candidates.csv' % work_dir, df)
def get_segmented_lungs(image): #Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = generate_markers(image) #Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) #Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) #Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3,3)) outline = outline.astype(bool) #Performing Black-Tophat Morphology for reinclusion #Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] #blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) blackhat_struct = ndimage.iterate_structure(blackhat_struct, 14) # <- retains more of the area, 12 works well. Changed to 14, 12 still excluded some parts. #Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) #Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) #Close holes in the lungfilter #fill_holes is not used here, since in some slices the heart would be reincluded by accident lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5,5)), iterations=3) #Apply the lungfilter (note the filtered areas being assigned threshold_min HU) segmented = np.where(lungfilter == 1, image, threshold_min*np.ones(image.shape)) #return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed return segmented