Python scipy.ndimage 模块,find_objects() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用scipy.ndimage.find_objects()

项目:kaggle-right-whale    作者:felixlaumon    | 项目源码 | 文件源码
def get_transformed_bbox(bbox, image_width, image_height, **kwargs):
    l, t, w, h = bbox
    r = l + w
    b = t + h
    y_heatmap = np.zeros((image_height, image_width)).astype(bool)
    y_heatmap[t:b, l:r] = True

    y_heatmap = im_affine_transform(y_heatmap[np.newaxis, ...], **kwargs)
    y_heatmap = y_heatmap[0].astype(bool)

    dets = find_objects(y_heatmap)

    if len(dets) == 1:
        t = dets[0][0].start
        b = dets[0][0].stop
        l = dets[0][1].start
        r = dets[0][1].stop
        w = r - l
        h = b - t
    else:
        l, t, w, h = 0, 0, 0, 0

    return l, t, w, h
项目:Lifting-from-the-Deep-release    作者:DenisTome    | 项目源码 | 文件源码
def detect_objects_heatmap(heatmap):
    data = 256 * heatmap
    data_max = filters.maximum_filter(data, 3)
    maxima = (data == data_max)
    data_min = filters.minimum_filter(data, 3)
    diff = ((data_max - data_min) > 0.3)
    maxima[diff == 0] = 0
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    objects = np.zeros((num_objects, 2), dtype=np.int32)
    pidx = 0
    for (dy, dx) in slices:
        pos = [(dy.start + dy.stop - 1) // 2, (dx.start + dx.stop - 1) // 2]
        if heatmap[pos[0], pos[1]] > config.CENTER_TR:
            objects[pidx, :] = pos
            pidx += 1
    return objects[:pidx]
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
项目:pyku    作者:dubvulture    | 项目源码 | 文件源码
def extract_digits(self, image):
        """
        Extract digits from a binary image representing a sudoku
        :param image: binary image/sudoku
        :return: array of digits and their probabilities
        """
        prob = np.zeros(4, dtype=np.float32)
        digits = np.zeros((4, 9, 9), dtype=object)
        for i in range(4):
            labeled, features = label(image, structure=CROSS)
            objs = find_objects(labeled)
            for obj in objs:
                roi = image[obj]
                # center of bounding box
                cy = (obj[0].stop + obj[0].start) / 2
                cx = (obj[1].stop + obj[1].start) / 2
                dists = cdist([[cy, cx]], CENTROIDS, 'euclidean')
                pos = np.argmin(dists)
                cy, cx = pos % 9, pos / 9
                # 28x28 image, center relative to sudoku
                prediction = self.classifier.classify(morph(roi))
                if digits[i, cy, cx] is 0:
                    # Newly found digit
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
                elif prediction[0, 0] > digits[i, cy, cx][0, 0]:
                    # Overlapping! (noise), choose the most probable prediction
                    prob[i] -= digits[i, cy, cx][0, 0]
                    digits[i, cy, cx] = prediction
                    prob[i] += prediction[0, 0]
            image = np.rot90(image)
        logging.info(prob)
        return digits[np.argmax(prob)]
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def real_indices(slices, resolutions):
    """
    Transform the discrete (voxels based) coordinates of the boundingbox (`slices`) into their real-world size using `resolutions`.

    Args:
       slices: (list) - list of slices or boundingboxes found using scipy.ndimage.find_objects;
       resolutions: (list) - length-2 (2D) or length-3 (3D) vector of float indicating the size of a voxel in real-world units;
    """
    return [ (s.start*r, s.stop*r) for s,r in zip(slices,resolutions) ]
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def find_smallest_boundingbox(image, label_1, label_2):
    """Return the smallest boundingbox within `image` between cell-labels `label_1` & `label_2`."""
    boundingbox = nd.find_objects(image, max_label=max([label_1, label_2]))
    boundingbox = {label_1:boundingbox[label_1-1], label_2:boundingbox[label_2-1]} # we do 'label_x - 1' since 'nd.find_objects' start at '1' (and not '0') !
    label_1, label_2 = sort_boundingbox(boundingbox, label_1, label_2)
    return bbox[label_1]
项目:decode    作者:deshima-dev    | 项目源码 | 文件源码
def slicewhere(condition):
    """Return slices of regions that fulfill condition.

    Example:
        >>> cond = [False, True, True, False, False, True, False]
        >>> fm.utils.slicewhere(cond)
        [slice(1L, 3L, None), slice(5L, 6L, None)]

    Args:
        condition (numpy.ndarray): Array of booleans.

    Returns:
        slices (list of slice): List of slice objects.
    """
    return [region[0] for region in ndimage.find_objects(ndimage.label(condition)[0])]
项目:indus-script-ocr    作者:tpsatish95    | 项目源码 | 文件源码
def get_candidate_symbol_regions(image, text_regions, updated_width, updated_height):
    img = skimage.io.imread(image.name)[:, :, :3]
    if not (updated_height == len(img) and updated_width == len(img[0])):
        img = skimage.transform.resize(img, (updated_height, updated_width))

    symbol_regions = dict()
    for x, y, w, h in text_regions:
        text_region_image = img[y: y + h, x: x + w]
        text_region_image_width = len(text_region_image[0])
        text_region_image_height = len(text_region_image)

        text_region_gray_image = skimage.color.rgb2gray(text_region_image)
        text_region_binary_image = image <= threshold_otsu(text_region_gray_image)

        temp = TemporaryFile(".png")
        skimage.io.imsave(temp.name, text_region_binary_image)
        text_region_binary_image = skimage.io.imread(temp.name)

        text_region_blurred_image = gaussian_filter(text_region_binary_image, sigma=3.5)
        text_region_blobs = text_region_blurred_image > text_region_blurred_image.mean()

        text_region_labels = skimage.morphology.label(text_region_blobs, neighbors=4)

        symbol_blobs = ndimage.find_objects(text_region_labels)
        candidate_symbol_regions = set()

        for c1, c2 in symbol_blobs:
            if (c2.stop - c2.start) * c1.stop - c1.start > (text_region_image.shape[0] * text_region_image.shape[1]) * (0.026):
                if (c2.stop - c2.start) * c1.stop - c1.start < (text_region_image.shape[0] * text_region_image.shape[1]) * (0.90):
                    candidate_symbol_regions.add(
                        (c2.start, c1.start, c2.stop - c2.start, c1.stop - c1.start))

        symbol_regions[str((x, y, w, h))] = dict()
        symbol_regions[str((x, y, w, h))]["image"] = text_region_image
        symbol_regions[str((x, y, w, h))]["regions"] = candidate_symbol_regions
        symbol_regions[str((x, y, w, h))]["width"] = text_region_image_width
        symbol_regions[str((x, y, w, h))]["height"] = text_region_image_height

    return symbol_regions
项目:stomatameasurer    作者:TeamMacLean    | 项目源码 | 文件源码
def get_stomata_info(stomata):
    l = ndimage.find_objects(stomata)
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def boundingbox(self, labels = None, real = False):
        """
        Return the bounding box of a label.

        :Examples:

        >>> import numpy as np
        >>> a = np.array([[1, 2, 7, 7, 1, 1],
                          [1, 6, 5, 7, 3, 3],
                          [2, 2, 1, 7, 3, 3],
                          [1, 1, 1, 4, 1, 1]])

        >>> from vplants.tissue_analysis.spatial_image_analysis import SpatialImageAnalysis
        >>> analysis = SpatialImageAnalysis(a)

        >>> analysis.boundingbox(7)
        (slice(0, 3), slice(2, 4), slice(0, 1))

        >>> analysis.boundingbox([7,2])
        [(slice(0, 3), slice(2, 4), slice(0, 1)), (slice(0, 3), slice(0, 2), slice(0, 1))]

        >>> analysis.boundingbox()
        [(slice(0, 4), slice(0, 6), slice(0, 1)),
        (slice(0, 3), slice(0, 2), slice(0, 1)),
        (slice(1, 3), slice(4, 6), slice(0, 1)),
        (slice(3, 4), slice(3, 4), slice(0, 1)),
        (slice(1, 2), slice(2, 3), slice(0, 1)),
        (slice(1, 2), slice(1, 2), slice(0, 1)),
        (slice(0, 3), slice(2, 4), slice(0, 1))]
        """
        if labels == 0:
            return nd.find_objects(self.image==0)[0]

        if self._bbox is None:
            self._bbox = nd.find_objects(self.image)

        if labels is None:
            labels = copy.copy(self.labels())
            if self.background() is not None:
                labels.append(self.background())

        # bbox of object labelled 1 to n are stored into self._bbox. To access i-th element, we have to use i-1 index
        if isinstance (labels, list):
            bboxes = [self._bbox[i-1] for i in labels]
            if real : return self.convert_return([real_indices(bbox,self._voxelsize) for bbox in bboxes],labels)
            else : return self.convert_return(bboxes,labels)

        else :
            try:
                if real:  return real_indices(self._bbox[labels-1], self._voxelsize)
                else : return self._bbox[labels-1]
            except:
                return None
项目:imagepy    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, imgs, para = None):
        inten = WindowsManager.get(para['inten']).ips
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else: 
            msks = ips.imgs
            if len(msks)==1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        idct = ['Max','Min','Mean','Variance','Standard','Sum']
        key = {'Max':'max','Min':'min','Mean':'mean',
               'Variance':'var','Standard':'std','Sum':'sum'}
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:] 
        if para['center']: titles.extend(['Center-X','Center-Y'])
        if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [], []
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n+1)
            dt = []
            if para['slice']:dt.append([i]*n)
            dt.append(range(n))

            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']:dt.extend([xy[1]*k, xy[0]*k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [(i[0].start, i[1].start, i[0].stop, i[1].stop) for i in boxs]
                for j in (0,1,2,3):
                    dt.append([i[j]*k for i in boxs])
            if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))        
            if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) 
            if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))      

            mark.append([(center, cov) for center,cov in zip(xy.T, boxs)]) 
            data.extend(list(zip(*dt)))

        IPy.table(inten.title+'-region statistic', data, titles)
        inten.mark = Mark(mark)
        inten.update = True
项目:CartoonPy    作者:bxtkezhan    | 项目源码 | 文件源码
def findObjects(clip,rem_thr=500, preview=False):
    """ 
    Returns a list of ImageClips representing each a separate object on
    the screen.

    rem_thr : all objects found with size < rem_Thr will be
         considered false positives and will be removed

    """

    image = clip.get_frame(0)
    if clip.mask is None:
        clip = clip.add_mask()

    mask = clip.mask.get_frame(0)
    labelled, num_features = ndi.measurements.label(image[:,:,0])

    #find the objects
    slices = ndi.find_objects(labelled)
    # cool trick to remove letter holes (in o,e,a, etc.)
    slices = [e for e in slices if  mask[e[0],e[1]].mean() >0.2]
    # remove very small slices
    slices = [e for e in slices if  image[e[0],e[1]].size > rem_thr]
    # Sort the slices from left to right
    islices = sorted(enumerate(slices), key = lambda s : s[1][1].start)

    letters = []
    for i,(ind,(sy,sx)) in enumerate(islices):
        """ crop each letter separately """
        sy = slice(sy.start-1,sy.stop+1)
        sx = slice(sx.start-1,sx.stop+1)
        letter = image[sy,sx]
        labletter = labelled[sy,sx]
        maskletter = (labletter==(ind+1))*mask[sy,sx]
        letter = ImageClip(image[sy,sx])
        letter.mask = ImageClip( maskletter,ismask=True)
        letter.screenpos = np.array((sx.start,sy.start))
        letters.append(letter)

    if preview:
        import matplotlib.pyplot as plt
        print( "found %d objects"%(num_features) )
        fig,ax = plt.subplots(2)
        ax[0].axis('off')
        ax[0].imshow(labelled)
        ax[1].imshow([range(num_features)],interpolation='nearest')
        ax[1].set_yticks([])
        plt.show()

    return letters
项目:CartoonPy    作者:bxtkezhan    | 项目源码 | 文件源码
def findObjects(clip,rem_thr=500, preview=False):
    """ 
    Returns a list of ImageClips representing each a separate object on
    the screen.

    rem_thr : all objects found with size < rem_Thr will be
         considered false positives and will be removed

    """

    image = clip.get_frame(0)
    if clip.mask is None:
        clip = clip.add_mask()

    mask = clip.mask.get_frame(0)
    labelled, num_features = ndi.measurements.label(image[:,:,0])

    #find the objects
    slices = ndi.find_objects(labelled)
    # cool trick to remove letter holes (in o,e,a, etc.)
    slices = [e for e in slices if  mask[e[0],e[1]].mean() >0.2]
    # remove very small slices
    slices = [e for e in slices if  image[e[0],e[1]].size > rem_thr]
    # Sort the slices from left to right
    islices = sorted(enumerate(slices), key = lambda s : s[1][1].start)

    letters = []
    for i,(ind,(sy,sx)) in enumerate(islices):
        """ crop each letter separately """
        sy = slice(sy.start-1,sy.stop+1)
        sx = slice(sx.start-1,sx.stop+1)
        letter = image[sy,sx]
        labletter = labelled[sy,sx]
        maskletter = (labletter==(ind+1))*mask[sy,sx]
        letter = ImageClip(image[sy,sx])
        letter.mask = ImageClip( maskletter,ismask=True)
        letter.screenpos = np.array((sx.start,sy.start))
        letters.append(letter)

    if preview:
        import matplotlib.pyplot as plt
        print( "found %d objects"%(num_features) )
        fig,ax = plt.subplots(2)
        ax[0].axis('off')
        ax[0].imshow(labelled)
        ax[1].imshow([range(num_features)],interpolation='nearest')
        ax[1].set_yticks([])
        plt.show()

    return letters
项目:decoding_challenge_cortana_2016_3rd    作者:kingjr    | 项目源码 | 文件源码
def _find_clusters_1dir(x, x_in, connectivity, max_step, t_power, ndimage):
    """Actually call the clustering algorithm"""
    if connectivity is None:
        labels, n_labels = ndimage.label(x_in)

        if x.ndim == 1:
            # slices
            clusters = ndimage.find_objects(labels, n_labels)
            if len(clusters) == 0:
                sums = list()
            else:
                index = list(range(1, n_labels + 1))
                if t_power == 1:
                    sums = ndimage.measurements.sum(x, labels, index=index)
                else:
                    sums = ndimage.measurements.sum(np.sign(x) *
                                                    np.abs(x) ** t_power,
                                                    labels, index=index)
        else:
            # boolean masks (raveled)
            clusters = list()
            sums = np.empty(n_labels)
            for l in range(1, n_labels + 1):
                c = labels == l
                clusters.append(c.ravel())
                if t_power == 1:
                    sums[l - 1] = np.sum(x[c])
                else:
                    sums[l - 1] = np.sum(np.sign(x[c]) *
                                         np.abs(x[c]) ** t_power)
    else:
        if x.ndim > 1:
            raise Exception("Data should be 1D when using a connectivity "
                            "to define clusters.")
        if isinstance(connectivity, sparse.spmatrix):
            clusters = _get_components(x_in, connectivity)
        elif isinstance(connectivity, list):  # use temporal adjacency
            clusters = _get_clusters_st(x_in, connectivity, max_step)
        else:
            raise ValueError('Connectivity must be a sparse matrix or list')
        if t_power == 1:
            sums = np.array([np.sum(x[c]) for c in clusters])
        else:
            sums = np.array([np.sum(np.sign(x[c]) * np.abs(x[c]) ** t_power)
                            for c in clusters])

    return clusters, np.atleast_1d(sums)
项目:stomatameasurer    作者:TeamMacLean    | 项目源码 | 文件源码
def get_stomata(max_proj_image, min_obj_size=200, max_obj_size=1000):
    """Performs image segmentation from a max_proj_image.
     Disposes of objects in range min_obj_size to
    max_obj_size

    :param max_proj_image: the maximum projection image
    :type max_proj_image: numpy.ndarray, uint16
    :param min_obj_size: minimum size of object to keep
    :type min_obj_size: int
    :param max_obj_size: maximum size of object to keep
    :type max_obj_size: int
    :returns: list of [ [coordinates of kept objects - list of slice objects],
                        binary object image - numpy.ndarray,
                        labelled object image - numpy.ndarray
                     ]

    """

    # pore_margin = 10
    # max_obj_size = 1000
    # min_obj_size = 200
    # for prop, value in segment_options:
    #     if prop == 'pore_margin':
    #         pore_margin = value
    #     if prop == 'max_obj_size':
    #         max_obj_size = value
    #     if prop == 'min_obj_size':
    #         min_obj_size = value
    #
    # print(pore_margin)
    # print(max_obj_size)
    # print(min_obj_size)

    #rescale_min = 50
    #rescale_max= 100
    #rescaled = exposure.rescale_intensity(max_proj_image, in_range=(rescale_min,rescale_max))
    rescaled = max_proj_image
    seed = np.copy(rescaled)
    seed[1:-1, 1:-1] = rescaled.max()
    #mask = rescaled
    #if gamma != None:
    #    rescaled = exposure.adjust_gamma(max_proj_image, gamma)
    #filled = reconstruction(seed, mask, method='erosion')
    closed = dilation(rescaled)
    seed = np.copy(closed)
    seed[1:-1, 1:-1] = closed.max()
    mask = closed


    filled = reconstruction(seed, mask, method='erosion')
    label_objects, nb_labels = ndimage.label(filled)
    sizes = np.bincount(label_objects.ravel())
    mask_sizes = sizes
    mask_sizes = (sizes > min_obj_size) & (sizes < max_obj_size)
    #mask_sizes = (sizes > 200) & (sizes < 1000)
    mask_sizes[0] = 0
    big_objs = mask_sizes[label_objects]
    stomata, _ = ndimage.label(big_objs)
    obj_slices = ndimage.find_objects(stomata)
    return [obj_slices, big_objs, stomata]