Python scipy.ndimage 模块,sum() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用scipy.ndimage.sum()

项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def _neighbors_from_list_with_mask(self, labels, min_contact_area=None, real_area=True):
        if (not self._neighbors is None) and (sum([i in self._neighbors.keys() for i in labels])==len(labels)):
            result = dict([(i,self._neighbors[i]) for i in labels])
            if  min_contact_area is None:
                return result
            else:
                return self._filter_with_area(result, min_contact_area, real_area)

        edges = {}
        for label in labels:
            try:
                slices = self.boundingbox(label)
                ex_slices = dilation(slices)
                mask_img = self.image[ex_slices]
            except:
                mask_img = self.image
            neigh = list(contact_surface(mask_img,label))
            if min_contact_area is not None:
                neigh = self._neighbors_filtering_by_contact_area(label, neigh, min_contact_area, real_area)
            edges[label] = neigh

        return edges
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def _add_provenance(in_file, settings, air_msk, rot_msk):
    from mriqc import __version__ as version
    from niworkflows.nipype.utils.filemanip import hash_infile
    import nibabel as nb
    import numpy as np

    air_msk_size = nb.load(air_msk).get_data().astype(
        np.uint8).sum()
    rot_msk_size = nb.load(rot_msk).get_data().astype(
        np.uint8).sum()

    out_prov = {
        'md5sum': hash_infile(in_file),
        'version': version,
        'software': 'mriqc',
        'warnings': {
            'small_air_mask': bool(air_msk_size < 5e5),
            'large_rot_frame': bool(rot_msk_size > 500),
        }
    }

    if settings:
        out_prov['settings'] = settings

    return out_prov
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_gaussian_smoothing_force(topomesh,gaussian_sigma=10.0):

    compute_topomesh_property(topomesh,'vertices',degree=1)
    edge_vertices = topomesh.wisp_property('vertices',degree=1).values()

    reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
    edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)


    edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])
    edge_lengths = np.linalg.norm(edge_vectors,axis=1)
    gaussian_edge_lengths = np.exp(-np.power(edge_lengths,2.0)/np.power(gaussian_sigma,2.0))
    gaussian_edge_vectors = gaussian_edge_lengths[:,np.newaxis]*edge_vectors

    gaussian_force = np.transpose([nd.sum(gaussian_edge_vectors[:,d],edge_vertices[:,0],index=list(topomesh.wisps(0))) for d in [0,1,2]])
    vertices_weights = 1.+nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
    gaussian_force = gaussian_force/vertices_weights[:,np.newaxis]

    return gaussian_force
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def histo_plot(figure,X,color,xlabel="",ylabel="",cumul=False,bar=True,n_points=400,smooth_factor=0.1,spline_order=3,linewidth=3,alpha=1.0,label=""):
    if '%' in xlabel:
        magnitude = 100
        X_values = np.array(np.minimum(np.around(X),n_points+1),int)
    else:
        # magnitude = np.power(10,np.around(4*np.log10(X.mean()))/4+0.5)
        magnitude = np.power(10,np.around(4*np.log10(np.nanmean(X)+np.nanstd(X)+1e-7))/4+1)
        magnitude = np.around(magnitude,int(-np.log10(magnitude))+1)
        # print magnitude
        #magnitude = X.mean()+5.0*X.std()
        X_values = np.array(np.minimum(np.around(n_points*X[True-np.isnan(X)]/magnitude),n_points+1),int)
    X_histo = np.zeros(n_points+1,float)
    for x in np.linspace(0,n_points,n_points+1):
        X_histo[x] = nd.sum(np.ones_like(X_values,float),X_values,index=x)
        if '%' in ylabel:
            X_histo[x] /= X_values.size/100.0
        if cumul:
            X_histo[x] += X_histo[x-1]

    if bar:
        bar_plot(figure,np.linspace(0,magnitude,n_points+1),X_histo,np.array([1,1,1]),color,xlabel,ylabel,label=label)
    else:
        smooth_plot(figure,np.linspace(0,magnitude,n_points+1),X_histo,color,color,xlabel,ylabel,n_points=n_points,smooth_factor=smooth_factor,spline_order=spline_order,linewidth=linewidth,alpha=alpha,label=label)
项目:SCaIP    作者:simonsfoundation    | 项目源码 | 文件源码
def com(A, d1, d2):
    """Calculation of the center of mass for spatial components
     Inputs:
     A:   np.ndarray
          matrix of spatial components (d x K)
     d1:  int
          number of pixels in x-direction
     d2:  int
          number of pixels in y-direction

     Output:
     cm:  np.ndarray
          center of mass for spatial components (K x 2)
    """
    nr = np.shape(A)[-1]
    Coor = dict()
    Coor['x'] = np.kron(np.ones((d2, 1)), np.expand_dims(range(d1), axis=1))
    Coor['y'] = np.kron(np.expand_dims(range(d2), axis=1), np.ones((d1, 1)))
    cm = np.zeros((nr, 2))        # vector for center of mass
    cm[:, 0] = np.dot(Coor['x'].T, A) / A.sum(axis=0)
    cm[:, 1] = np.dot(Coor['y'].T, A) / A.sum(axis=0)

    return cm
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def stellar_luminosity2(self, steps=10000):
        """
        DEPRECATED: ADW 2017-09-20

        Compute the stellar luminosity (L_Sol; average per star).
        Uses "sample" to generate mass sample and pdf.  The range of
        integration only covers the input isochrone data (no
        extrapolation used), but this seems like a sub-percent effect
        if the isochrone goes to 0.15 Msun for the old and metal-poor
        stellar populations of interest.

        Note that the stellar luminosity is very sensitive to the
        post-AGB population.
        """
        msg = "'%s.stellar_luminosity2': ADW 2017-09-20"%self.__class__.__name__
        DeprecationWarning(msg)
        mass_init, mass_pdf, mass_act, mag_1, mag_2 = self.sample(mass_steps=steps)
        luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False)
        luminosity = luminosity_interpolation(mass_init)
        return np.sum(luminosity * mass_pdf)

    # ADW: For temporary backward compatibility
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def filter_isolated_cells(array, struct):

    filtered_array = np.copy(array)
    id_regions, num_ids = ndimage.label(filtered_array, structure=struct)
    id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids + 1)))
    area_mask = (id_sizes == 1)
    filtered_array[area_mask[id_regions]] = 0

    return filtered_array

#Decide if given spectrum shows bird sounds or noise only
项目:diluvian    作者:aschampion    | 项目源码 | 文件源码
def get_largest_component(self, closing_shape=None):
        mask, bounds = self._get_bounded_mask(closing_shape)

        label_im, num_labels = ndimage.label(mask)
        label_sizes = ndimage.sum(mask, label_im, range(num_labels + 1))
        label_im[(label_sizes < label_sizes.max())[label_im]] = 0
        label_im = np.minimum(label_im, 1)

        if label_im[tuple(self.seed - bounds[0])] == 0:
            logging.warning('Seed voxel ({}) is not in connected component.'.format(np.array_str(self.seed)))

        return label_im, bounds
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def _run_interface(self, runtime):
        in_file = nb.load(self.inputs.in_file)
        data = in_file.get_data()
        mask = np.zeros_like(data, dtype=np.uint8)
        mask[data <= 0] = 1

        # Pad one pixel to control behavior on borders of binary_opening
        mask = np.pad(mask, pad_width=(1,), mode='constant', constant_values=1)

        # Remove noise
        struc = nd.generate_binary_structure(3, 2)
        mask = nd.binary_opening(mask, structure=struc).astype(
            np.uint8)

        # Remove small objects
        label_im, nb_labels = nd.label(mask)
        if nb_labels > 2:
            sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))
            ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
            for _, label in ordered[2:]:
                mask[label_im == label] = 0

        # Un-pad
        mask = mask[1:-1, 1:-1, 1:-1]

        # If mask is small, clean-up
        if mask.sum() < 500:
            mask = np.zeros_like(mask, dtype=np.uint8)

        out_img = in_file.__class__(mask, in_file.affine, in_file.header)
        out_img.header.set_data_dtype(np.uint8)

        out_file = fname_presuffix(self.inputs.in_file,
                                   suffix='_rotmask', newpath='.')
        out_img.to_filename(out_file)
        self._results['out_file'] = out_file
        return runtime
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def artifact_mask(imdata, airdata, distance, zscore=10.):
    """Computes a mask of artifacts found in the air region"""
    from statsmodels.robust.scale import mad

    if not np.issubdtype(airdata.dtype, np.integer):
        airdata[airdata < .95] = 0
        airdata[airdata > 0.] = 1

    bg_img = imdata * airdata
    if np.sum((bg_img > 0).astype(np.uint8)) < 100:
        return np.zeros_like(airdata)

    # Find the background threshold (the most frequently occurring value
    # excluding 0)
    bg_location = np.median(bg_img[bg_img > 0])
    bg_spread = mad(bg_img[bg_img > 0])
    bg_img[bg_img > 0] -= bg_location
    bg_img[bg_img > 0] /= bg_spread

    # Apply this threshold to the background voxels to identify voxels
    # contributing artifacts.
    qi1_img = np.zeros_like(bg_img)
    qi1_img[bg_img > zscore] = 1
    qi1_img[distance < .10] = 0

    # Create a structural element to be used in an opening operation.
    struc = nd.generate_binary_structure(3, 1)
    qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)
    qi1_img[airdata <= 0] = 0

    return qi1_img
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def fuzzy_jaccard(in_tpms, in_mni_tpms):
    overlaps = []
    for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):
        tpm = tpm.reshape(-1)
        mni_tpm = mni_tpm.reshape(-1)

        num = np.min([tpm, mni_tpm], axis=0).sum()
        den = np.max([tpm, mni_tpm], axis=0).sum()
        overlaps.append(float(num / den))
    return overlaps
项目:laplacian-meshes    作者:bmershon    | 项目源码 | 文件源码
def makeMask(self):
        #Narrow down to pixels which measured something every frame
        counts = np.sum(self.Zs > 0, 2)
        self.Mask = (counts == self.Zs.shape[2])
        #Find biggest connected component out of remaining pixels
        ILabel, NLabels = ndimage.label(self.Mask)
        idx = np.argmax(ndimage.sum(self.Mask, ILabel, range(NLabels+1)))
        self.Mask = (ILabel == idx)
        plt.imshow(self.Mask)
        plt.show()

    #Actually sets up all of the vertex, face, and adjacency structures in the 
    #PolyMeshObject
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_area_smoothing_force(topomesh,target_areas=None):

    compute_topomesh_property(topomesh,'vertices',degree=2)
    compute_topomesh_property(topomesh,'length',degree=1)

    triangle_vertices = topomesh.wisp_property('vertices',degree=2).values()
    rotated_triangle_vertices = np.transpose([triangle_vertices[:,2],triangle_vertices[:,0],triangle_vertices[:,1]])
    antirotated_triangle_vertices = np.transpose([triangle_vertices[:,1],triangle_vertices[:,2],triangle_vertices[:,0]])
    triangle_vertices = np.append(np.append(triangle_vertices,rotated_triangle_vertices,axis=0),antirotated_triangle_vertices,axis=0)

    edge_index_list = np.array([[1, 2],[0, 1],[0, 2]])
    triangle_edge_vertices = triangle_vertices[:,edge_index_list]

    triangle_edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,1]) - topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,0])
    #triangle_edge_lengths = np.power(np.sum(np.power(triangle_edge_vectors,2.0),axis=2),0.5)
    triangle_edge_lengths = np.linalg.norm(triangle_edge_vectors,axis=2)
    triangle_edge_directions = triangle_edge_vectors/triangle_edge_lengths[...,np.newaxis]

    triangle_perimeters = np.sum(triangle_edge_lengths,axis=1)
    triangle_areas = np.sqrt((triangle_perimeters/2.0)*(triangle_perimeters/2.0-triangle_edge_lengths[:,0])*(triangle_perimeters/2.0-triangle_edge_lengths[:,1])*(triangle_perimeters/2.0-triangle_edge_lengths[:,2]))
    triangle_areas[np.where(triangle_areas==0.0)] = 0.001

    if target_areas is None:
        target_areas = triangle_areas.mean()*np.ones(len(triangle_areas))
    else:
        target_areas = np.tile(target_areas,(3))

    area_edge_1_force = (target_areas-triangle_areas)*(triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,2]**2-triangle_edge_lengths[:,1]**2)*triangle_edge_lengths[:,1]
    area_edge_2_force = (target_areas-triangle_areas)*(triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,1]**2-triangle_edge_lengths[:,2]**2)*triangle_edge_lengths[:,2]

    area_unitary_force = -(area_edge_1_force[:,np.newaxis]*triangle_edge_directions[:,1] + area_edge_2_force[:,np.newaxis]*triangle_edge_directions[:,2])

    triangle_force = np.transpose([nd.sum(area_unitary_force[:,d],triangle_vertices[:,0],index=list(topomesh.wisps(0))) for d in xrange(3)])

    return triangle_force
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_laplacian_smoothing_force(topomesh,cellwise_smoothing=False):

    if not topomesh.has_wisp_property('vertices',degree=1,is_computed=True):
        compute_topomesh_property(topomesh,'vertices',degree=1)
    edge_vertices = topomesh.wisp_property('vertices',degree=1).values()
    reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
    edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)

    if not topomesh.has_wisp_property('valence',degree=0,is_computed=True):
        compute_topomesh_property(topomesh,'valence',degree=0)
    vertices_degrees = topomesh.wisp_property('valence',degree=0).values()

    if cellwise_smoothing:

        laplacian_force = np.zeros_like(topomesh.wisp_property('barycenter',degree=0).values(),np.float32)

        for c in topomesh.wisps(3):

            if not topomesh.has_wisp_property('vertices',degree=3,is_computed=True):
                compute_topomesh_property(topomesh,'vertices',degree=3)
            cell_vertices = topomesh.wisp_property('vertices',degree=3)[c]

            cell_edges = np.sum(nd.sum(np.ones_like(cell_vertices),cell_vertices,index=edge_vertices),axis=1)
            cell_edge_vertices = edge_vertices[np.where(cell_edges==2)[0]]
            cell_edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(cell_edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(cell_edge_vertices[:,0])

            cell_laplacian_force = np.transpose([nd.sum(cell_edge_vectors[:,0],cell_edge_vertices[:,0],index=cell_vertices),
                                                 nd.sum(cell_edge_vectors[:,1],cell_edge_vertices[:,0],index=cell_vertices),
                                                 nd.sum(cell_edge_vectors[:,2],cell_edge_vertices[:,0],index=cell_vertices)])
            laplacian_force[cell_vertices] += cell_laplacian_force/vertices_degrees[cell_vertices,np.newaxis]

    else:
        edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])

        laplacian_force = np.transpose([nd.sum(edge_vectors[:,d],edge_vertices[:,0],index=list(topomesh.wisps(0))) for d in [0,1,2]])
        laplacian_force = laplacian_force/vertices_degrees[:,np.newaxis]

    return laplacian_force
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def where_list(array,values):
    mask = nd.sum(np.ones_like(values),values,index=array)
    return np.where(mask>0)
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def density_plot(figure,X,Y,color,xlabel="",ylabel="",n_points=10,linewidth=1,marker_size=40.,alpha=1.0,label=""):
    font = fm.FontProperties(family = 'Trebuchet', weight ='light')
    #font = fm.FontProperties(family = 'CenturyGothic',fname = '/Library/Fonts/Microsoft/Century Gothic', weight ='light')
    figure.patch.set_facecolor('white')
    axes = figure.add_subplot(111)
    # axes.plot(X,Y,linewidth=1,color=tuple(color2),alpha=0.2)
    # ratios = (Y-Y.min())/(Y.max()-Y.min())
    # X_min = X.mean()-3*X.std()
    # X_max = X.mean()+3*X.std()
    X_min = np.percentile(X,100/n_points)
    X_max = np.percentile(X,100 - 100/n_points)
    Y_min = np.percentile(Y,100/n_points)
    # Y_min = Y.mean()-3*Y.std()
    Y_max = np.percentile(Y,100 - 100/n_points)

    X_grid = np.linspace(X_min,X_max,n_points)
    Y_grid = np.linspace(Y_min,Y_max,n_points)

    X_sampled = X_grid[vq(X,X_grid)[0]]
    Y_sampled = Y_grid[vq(Y,Y_grid)[0]]

    point_density = {}
    for x in np.unique(X_sampled):
        point_count = nd.sum(np.ones_like(np.where(X_sampled==x)),Y_sampled[np.where(X_sampled==x)],index=np.unique(Y_sampled))
        for i,y in enumerate(np.unique(Y_sampled)):
            point_density[(x,y)] = point_count[i]/len(Y)

    point_area = np.array([np.pi*10.0*marker_size*point_density[(x,y)]/np.array(point_density.values()).max() for x,y in zip(X_sampled,Y_sampled)])
    #colors = np.random.rand(len(X))
    colors = np.array([point_density[(x,y)]/np.array(point_density.values()).max() * color for x,y in zip(X_sampled,Y_sampled)])
    colors += np.array([(1-point_density[(x,y)]/np.array(point_density.values()).max()) * np.ones(3) for x,y in zip(X_sampled,Y_sampled)])

    axes.scatter(X_sampled,Y_sampled,s=point_area,c=colors,linewidth=linewidth,alpha=alpha,label=label)
    axes.set_xlim(X_min,X_max)
    axes.set_xlabel(xlabel,fontproperties=font, size=10, style='italic')
    axes.set_xticklabels(axes.get_xticks(),fontproperties=font, size=12)
    axes.set_ylim(Y_min,Y_max)
    axes.set_ylabel(ylabel, fontproperties=font, size=10, style='italic')
    axes.set_yticklabels(axes.get_yticks(),fontproperties=font, size=12)
项目:SCaIP    作者:simonsfoundation    | 项目源码 | 文件源码
def order_components(A, C):
    """Order components based on their maximum temporal value and size

    Parameters
    -----------
    A:   sparse matrix (d x K)
         spatial components
    C:   matrix or np.ndarray (K x T)
         temporal components

    Returns
    -------
    A_or:  np.ndarray
        ordered spatial components
    C_or:  np.ndarray
        ordered temporal components
    srt:   np.ndarray
        sorting mapping

    """
    A = np.array(A.todense())
    nA2 = np.sqrt(np.sum(A**2, axis=0))
    K = len(nA2)
    A = np.array(np.matrix(A) * spdiags(1 / nA2, 0, K, K))
    nA4 = np.sum(A**4, axis=0)**0.25
    C = np.array(spdiags(nA2, 0, K, K) * np.matrix(C))
    mC = np.ndarray.max(np.array(C), axis=1)
    srt = np.argsort(nA4 * mC)[::-1]
    A_or = A[:, srt] * spdiags(nA2[srt], 0, K, K)
    C_or = spdiags(1. / nA2[srt], 0, K, K) * (C[srt, :])

    return A_or, C_or, srt
项目:SCaIP    作者:simonsfoundation    | 项目源码 | 文件源码
def extract_DF_F(Y, A, C, i=None):
    """Extract DF/F values from spatial/temporal components and background

     Parameters
     -----------
     Y: np.ndarray
           input data (d x T)
     A: sparse matrix of np.ndarray
           Set of spatial including spatial background (d x K)
     C: matrix
           Set of temporal components including background (K x T)

     Returns
     -----------
     C_df: matrix
          temporal components in the DF/F domain
     Df:  np.ndarray
          vector with baseline values for each trace
    """
    A2 = A.copy()
    A2.data **= 2
    nA2 = np.squeeze(np.array(A2.sum(axis=0)))
    A = A * diags(1 / nA2, 0)
    C = diags(nA2, 0) * C

    # if i is None:
    #    i = np.argmin(np.max(A,axis=0))

    Y = np.matrix(Y)
    Yf = A.transpose() * (Y - A * C)  # + A[:,i]*C[i,:])
    Df = np.median(np.array(Yf), axis=1)
    C_df = diags(1 / Df, 0) * C

    return C_df, Df
项目:SCaIP    作者:simonsfoundation    | 项目源码 | 文件源码
def find_matches(D_s, print_assignment=False):

    matches=[]
    costs=[]
    t_start=time.time()
    for ii,D in enumerate(D_s):
        DD=D.copy()    
        if np.sum(np.where(np.isnan(DD)))>0:
            raise Exception('Distance Matrix contains NaN, not allowed!')


    #    indexes = m.compute(DD)
#        indexes = linear_assignment(DD)
        indexes = linear_sum_assignment(DD)
        indexes2=[(ind1,ind2) for ind1,ind2 in zip(indexes[0],indexes[1])]
        matches.append(indexes)
        DD=D.copy()   
        total = []
        for row, column in indexes2:
            value = DD[row,column]
            if print_assignment:
                print '(%d, %d) -> %f' % (row, column, value)
            total.append(value)      
        print  'FOV: %d, shape: %d,%d total cost: %f' % (ii, DD.shape[0],DD.shape[1], np.sum(total))
        print time.time()-t_start
        costs.append(total)      

    return matches,costs

#%%
项目:imagepy    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, snap, img, para = None):
        intenimg = WindowsManager.get(para['inten']).ips.img
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        buf, n = ndimage.label(snap, strc, output=np.uint16)
        index = range(1, n+1)
        idx = (np.ones(n+1)*para['front']).astype(np.uint8)
        msk = np.ones(n, dtype=np.bool)

        if para['mean']>0: msk *= ndimage.mean(intenimg, buf, index)>=para['mean']
        if para['mean']<0: msk *= ndimage.mean(intenimg, buf, index)<-para['mean']
        if para['max']>0: msk *= ndimage.maximum(intenimg, buf, index)>=para['max']
        if para['max']<0: msk *= ndimage.maximum(intenimg, buf, index)<-para['max']
        if para['min']>0: msk *= ndimage.minimum(intenimg, buf, index)>=para['min']
        if para['min']<0: msk *= ndimage.minimum(intenimg, buf, index)<-para['min']
        if para['sum']>0: msk *= ndimage.sum(intenimg, buf, index)>=para['sum']
        if para['sum']<0: msk *= ndimage.sum(intenimg, buf, index)<-para['sum']
        if para['std']>0: msk *= ndimage.standard_deviation(intenimg, buf, index)>=para['std']
        if para['std']<0: msk *= ndimage.standard_deviation(intenimg, buf, index)<-para['std']


        xy = ndimage.center_of_mass(intenimg, buf, index)
        xy = np.array(xy).round(2).T

        idx[1:][~msk] = para['back']
        idx[0] = 0
        img[:] = idx[buf]

        WindowsManager.get(para['inten']).ips.mark = RGMark((xy.T, msk))
        WindowsManager.get(para['inten']).ips.update = True
项目:imagepy    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, snap, img, para = None):
        ips.lut = self.buflut
        lab, n = ndimg.label(snap>para['thr2'], np.ones((3,3)), output=np.uint16)
        sta = ndimg.sum(snap>para['thr1'], lab, index=range(n+1)) > 0
        img[:] = (sta*255)[lab]
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def _centroid(x, y, density):
    """
    Computes weighted centroid of one bin.
    Equation (4) of Cappellari & Copin (2003)

    """
    mass = np.sum(density)
    xBar = x.dot(density)/mass
    yBar = y.dot(density)/mass

    return xBar, yBar

#----------------------------------------------------------------------
项目:procrustes    作者:bmershon    | 项目源码 | 文件源码
def makeMask(self):
        #Narrow down to pixels which measured something every frame
        counts = np.sum(self.Zs > 0, 2)
        self.Mask = (counts == self.Zs.shape[2])
        #Find biggest connected component out of remaining pixels
        ILabel, NLabels = ndimage.label(self.Mask)
        idx = np.argmax(ndimage.sum(self.Mask, ILabel, range(NLabels+1)))
        self.Mask = (ILabel == idx)
        plt.imshow(self.Mask)
        plt.show()

    #Actually sets up all of the vertex, face, and adjacency structures in the 
    #PolyMeshObject
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def stellar_mass(self, mass_min=0.1, steps=10000):
        """
        Compute the stellar mass (Msun; average per star). PDF comes
        from IMF, but weight by actual stellar mass.

        Parameters:
        -----------
        mass_min : Minimum mass to integrate the IMF
        steps    : Number of steps to sample the isochrone

        Returns:
        --------
        mass     : Stellar mass [Msun]
        """
        mass_max = self.mass_init_upper_bound

        d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
        log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
        mass = 10.**log_mass

        if mass_min < np.min(self.mass_init):
            mass_act_interpolation = scipy.interpolate.interp1d(np.insert(self.mass_init, 0, mass_min),
                                                                np.insert(self.mass_act, 0, mass_min))
        else:
           mass_act_interpolation = scipy.interpolate.interp1d(self.mass_init, self.mass_act) 

        mass_act = mass_act_interpolation(mass)
        return np.sum(mass_act * d_log_mass * self.imf.pdf(mass, log_mode=True))
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def stellar_luminosity(self, steps=10000):
        """
        Compute the stellar luminosity (Lsun; average per star). PDF
        comes from IMF.  The range of integration only covers the
        input isochrone data (no extrapolation used), but this seems
        like a sub-percent effect if the isochrone goes to 0.15 Msun
        for the old and metal-poor stellar populations of interest.

        Note that the stellar luminosity is very sensitive to the
        post-AGB population.

        Parameters:
        -----------
        steps : Number of steps to sample the isochrone.

        Returns:
        --------
        lum   : The stellar luminosity [Lsun]
        """
        mass_min = np.min(self.mass_init)
        mass_max = self.mass_init_upper_bound

        d_log_mass = (np.log10(mass_max) - np.log10(mass_min)) / float(steps)
        log_mass = np.linspace(np.log10(mass_min), np.log10(mass_max), steps)
        mass = 10.**log_mass

        luminosity_interpolation = scipy.interpolate.interp1d(self.mass_init, self.luminosity,fill_value=0,bounds_error=False)
        luminosity = luminosity_interpolation(mass)

        return np.sum(luminosity * d_log_mass * self.imf.pdf(mass, log_mode=True))
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def absolute_magnitude(self, richness=1, steps=1e4):
        """
        Calculate the absolute magnitude (Mv) by integrating the
        stellar luminosity.

        Parameters:
        -----------
        richness : isochrone normalization parameter
        steps    : number of isochrone sampling steps

        Returns:
        --------
        abs_mag : Absolute magnitude (Mv)
        """
        # Using the SDSS g,r -> V from Jester 2005 [arXiv:0506022]
        # for stars with R-I < 1.15
        # V = g_sdss - 0.59(g_sdss-r_sdss) - 0.01
        # g_des = g_sdss - 0.104(g_sdss - r_sdss) + 0.01
        # r_des = r_sdss - 0.102(g_sdss - r_sdss) + 0.02
        if self.survey.lower() != 'des':
            raise Exception('Only valid for DES')
        if 'g' not in [self.band_1,self.band_2]:
            msg = "Need g-band for absolute magnitude"
            raise Exception(msg)    
        if 'r' not in [self.band_1,self.band_2]:
            msg = "Need r-band for absolute magnitude"
            raise Exception(msg)    

        mass_init,mass_pdf,mass_act,mag_1,mag_2=self.sample(mass_steps = steps)
        g,r = (mag_1,mag_2) if self.band_1 == 'g' else (mag_2,mag_1)

        V = g - 0.487*(g - r) - 0.0249
        flux = np.sum(mass_pdf*10**(-V/2.5))
        Mv = -2.5*np.log10(richness*flux)
        return Mv
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def observableFractionCMDX(self, mask, distance_modulus, mass_min=0.1):
        """
        Compute observable fraction of stars with masses greater than mass_min in each 
        pixel in the interior region of the mask.

        ADW: Careful, this function is fragile! The selection here should
             be the same as mask.restrictCatalogToObservable space. However,
             for technical reasons it is faster to do the calculation with
             broadcasting here.
        ADW: Could this function be even faster / more readable?
        ADW: Should this include magnitude error leakage?
        """
        mass_init_array,mass_pdf_array,mass_act_array,mag_1_array,mag_2_array = self.sample(mass_min=mass_min,full_data_range=False)
        mag = mag_1_array if self.band_1_detection else mag_2_array
        color = mag_1_array - mag_2_array

        # ADW: Only calculate observable fraction over interior pixels...
        pixels = mask.roi.pixels_interior
        mag_1_mask = mask.mask_1.mask_roi_sparse[mask.roi.pixel_interior_cut]
        mag_2_mask = mask.mask_2.mask_roi_sparse[mask.roi.pixel_interior_cut]

        # ADW: Restrict mag and color to range of mask with sufficient solid angle
        cmd_cut = ugali.utils.binning.take2D(mask.solid_angle_cmd,color,mag+distance_modulus,
                                             mask.roi.bins_color, mask.roi.bins_mag) > 0
        # Pre-apply these cuts to the 1D mass_pdf_array to save time
        mass_pdf_cut = mass_pdf_array*cmd_cut

        # Create 2D arrays of cuts for each pixel
        mask_1_cut = (mag_1_array+distance_modulus)[:,np.newaxis] < mag_1_mask
        mask_2_cut = (mag_2_array+distance_modulus)[:,np.newaxis] < mag_2_mask
        mask_cut_repeat = mask_1_cut & mask_2_cut

        observable_fraction = (mass_pdf_cut[:,np.newaxis]*mask_cut_repeat).sum(axis=0)
        return observable_fraction
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def observableFractionMMD(self, mask, distance_modulus, mass_min=0.1):
        # This can be done faster...
        logger.info('Calculating observable fraction from MMD')

        mmd = self.signalMMD(mask,distance_modulus)
        obs_frac = mmd.sum(axis=-1).sum(axis=-1)[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]
        return obs_frac
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def absolute_magnitude(distance_modulus,g,r,prob=None):
    """ Calculate the absolute magnitude from a set of bands """
    V = g - 0.487*(g - r) - 0.0249

    flux = np.sum(10**(-(V-distance_modulus)/2.5))
    Mv = -2.5*np.log10(flux)
    return Mv
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def compute_image_property(self, property_name, min_contact_area=None, sub_factor=8.):
        """
        """
        computable_properties = ['barycenter','volume','neighborhood_size','layer','mean_curvature','gaussian_curvature']
        try:
            assert property_name in computable_properties
        except:
            print "Property \""+property_name+"\" can not be computed on image"
            print "Try with one of the following :"
            for p in computable_properties:
                print "  * "+p
        else:
            if self._image is not None:
                if property_name in ['barycenter','volume']:
                    graph = graph_from_image(self._image,background=self.background,spatio_temporal_properties=[property_name],ignore_cells_at_stack_margins=self.ignore_cells_at_stack_margins)
                    property_dict = graph.vertex_property(property_name)
                elif property_name == 'neighborhood_size':
                    neighbors = [self.image_graph.neighbors(l) for l in self.labels]
                    property_dict = dict(zip(self.labels,map(len,neighbors)))
                elif property_name == 'layer':
                    if min_contact_area is None:
                        min_contact_area = self.min_contact_area
                    graph = graph_from_image(self._image,background=self.background,spatio_temporal_properties=['L1'],ignore_cells_at_stack_margins=self.ignore_cells_at_stack_margins, min_contact_area=min_contact_area)
                    first_layer = graph.vertex_property('L1')
                    second_layer_cells = [v for v in graph.vertices() if np.any([first_layer[n] for n in graph.neighbors(v)]) and not first_layer[v]]
                    second_layer = dict(zip(list(graph.vertices()),[v in second_layer_cells for v in graph.vertices()]))
                    property_dict = dict(zip(self.labels,[1 if first_layer[l] else 2 if second_layer[l] else 3 for l in self.labels]))
                elif property_name in ['mean_curvature','gaussian_curvature']:
                    if not self.has_image_property('barycenter'):
                        self.compute_image_property('barycenter')
                    if not self.has_image_property('layer'):
                        print "--> Computing layer property"
                        self.compute_image_property('layer')

                    cell_centers = self.image_property('barycenter')
                    L1_cells = self.labels[self.image_property('layer').values()==1]

                    from openalea.cellcomplex.property_topomesh.utils.implicit_surfaces import implicit_surface_topomesh
                    from openalea.cellcomplex.property_topomesh.property_topomesh_analysis import compute_topomesh_property, compute_topomesh_vertex_property_from_faces
                    from openalea.cellcomplex.property_topomesh.property_topomesh_optimization import property_topomesh_vertices_deformation, topomesh_triangle_split

                    sub_binary_image = (self._image!=self.background).astype(float)[::sub_factor,::sub_factor,::sub_factor]
                    surface_topomesh = implicit_surface_topomesh(sub_binary_image,np.array(sub_binary_image.shape),sub_factor*np.array(self._image.voxelsize),center=False)
                    property_topomesh_vertices_deformation(surface_topomesh,iterations=10)

                    compute_topomesh_property(surface_topomesh,'barycenter',2)
                    compute_topomesh_property(surface_topomesh,'normal',2,normal_method='orientation')

                    compute_topomesh_vertex_property_from_faces(surface_topomesh,'normal',adjacency_sigma=2,neighborhood=5)
                    compute_topomesh_property(surface_topomesh,'mean_curvature',2)
                    compute_topomesh_vertex_property_from_faces(surface_topomesh,property_name,adjacency_sigma=2,neighborhood=5)

                    surface_cells = L1_cells[vq(surface_topomesh.wisp_property('barycenter',0).values(),cell_centers.values(L1_cells))[0]]
                    surface_topomesh.update_wisp_property('label',0,array_dict(surface_cells,list(surface_topomesh.wisps(0))))

                    L1_cell_property = nd.sum(surface_topomesh.wisp_property(property_name,0).values(),surface_cells,index=L1_cells)/nd.sum(np.ones_like(surface_cells),surface_cells,index=L1_cells)
                    L1_cell_property = array_dict(L1_cell_property,L1_cells)    
                    property_dict = array_dict([L1_cell_property[l] if (l in L1_cells) and (not np.isnan(L1_cell_property[l])) else 0 for l in self.labels],self.labels)

                property_data = [property_dict[l] for l in self.labels]
                self.update_image_property(property_name,property_data)
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def volume(self, labels = None, real = True):
        """
        Return the volume of the labels.

        Args:
           labels: (int) - single label number or a sequence of
            label numbers of the objects to be measured.
            If labels is None, all labels are used.

           real: (bool) - If real = True, volume is in real-world units else in voxels.

        :Examples:

        >>> import numpy as np
        >>> a = np.array([[1, 2, 7, 7, 1, 1],
                          [1, 6, 5, 7, 3, 3],
                          [2, 2, 1, 7, 3, 3],
                          [1, 1, 1, 4, 1, 1]])

        >>> from vplants.tissue_analysis.spatial_image_analysis import SpatialImageAnalysis
        >>> analysis = SpatialImageAnalysis(a)

        >>> analysis.volume(7)
        4.0

        >>> analysis.volume([7,2])
        [4.0, 3.0]

        >>> analysis.volume()
        [10.0, 3.0, 4.0, 1.0, 1.0, 1.0, 4.0]
        """
        # Check the provided `labels`:
        labels = self.label_request(labels)

        volume = nd.sum(np.ones_like(self.image), self.image, index=np.int16(labels))
        # convert to real-world units if asked:
        if real is True:
            if self.image.ndim == 2:
                volume = np.multiply(volume,(self._voxelsize[0]*self._voxelsize[1]))
            elif self.image.ndim == 3:
                volume = np.multiply(volume,(self._voxelsize[0]*self._voxelsize[1]*self._voxelsize[2]))
            volume.tolist()

        if not isinstance(labels, int):
            return self.convert_return(volume, labels)
        else:
            return volume
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def cells_voxel_layer(self, labels, region_boundingbox = False, single_frame = False):
        """
        This function extract the first layer of voxel surrounding a cell defined by `label`
        Args:
           label: (int|list) - cell-label for which we want to extract the first layer of voxel;
           region_boundingbox: (bool) - if True, consider a boundingbox surrounding all labels, instead of each label alone.
           single_frame: (bool) - if True, return only one array with all voxels position defining cell walls.
        :Output:
         returns a binary image: 1 where the cell-label of interest is, 0 elsewhere
        """
        if isinstance(labels,int):
            labels = [labels]
        if single_frame:
            region_boundingbox=True

        if not isinstance(region_boundingbox,bool):
            if sum([isinstance(s,slice) for s in region_boundingbox])==3:
                bbox = region_boundingbox
            else:
                print "TypeError: Wong type for 'region_boundingbox', should either be bool or la tuple of slices"
                return None
        elif isinstance(region_boundingbox,bool) and region_boundingbox:
            bbox = self.region_boundingbox(labels)
        else:
            bboxes = self.boundingbox(labels, real=False)

        # Generate the smaller eroding structure possible:
        struct = nd.generate_binary_structure(3, 2)
        if single_frame:
            vox_layer = np.zeros_like(self.image[bbox], dtype=int)
        else:
            vox_layer = {}
        for clabel in labels:
            if region_boundingbox:
                bbox_im = self.image[bbox]
            else:
                bbox_im = self.image[bboxes[clabel]]
            # Creating a mask (1 where the cell-label of interest is, 0 elsewhere):
            mask_bbox_im = (bbox_im == clabel)
            # Erode the cell using the structure:
            eroded_mask_bbox_im = nd.binary_erosion(mask_bbox_im, structure=struct)
            if single_frame:
                vox_layer += np.array(mask_bbox_im - eroded_mask_bbox_im, dtype=int)
            else:
                vox_layer[clabel] = np.array(mask_bbox_im - eroded_mask_bbox_im, dtype=int)

        if len(labels)==1:
            return vox_layer[clabel]
        else:
            return vox_layer
项目:tissue_analysis    作者:VirtualPlants    | 项目源码 | 文件源码
def geometric_median(X, numIter = 200):
    """
    Compute the geometric median of a point sample.
    The geometric median coordinates will be expressed in the Spatial Image reference system (not in real world metrics).
    We use the Weiszfeld's algorithm (http://en.wikipedia.org/wiki/Geometric_median)

    Args:
       X: (list|np.array) - voxels coordinate (3xN matrix)
       numIter: (int) - limit the length of the search for global optimum

    Returns:
     - np.array((x,y,z)): geometric median of the coordinates;
    """
    # -- Initialising 'median' to the centroid
    y = np.mean(X,1)
    # -- If the init point is in the set of points, we shift it:
    while (y[0] in X[0]) and (y[1] in X[1]) and (y[2] in X[2]):
        y+=0.1

    convergence=False # boolean testing the convergence toward a global optimum
    dist=[] # list recording the distance evolution

    # -- Minimizing the sum of the squares of the distances between each points in 'X' and the median.
    i=0
    while ( (not convergence) and (i < numIter) ):
        num_x, num_y, num_z = 0.0, 0.0, 0.0
        denum = 0.0
        m = X.shape[1]
        d = 0
        for j in range(0,m):
            div = math.sqrt( (X[0,j]-y[0])**2 + (X[1,j]-y[1])**2 + (X[2,j]-y[2])**2 )
            num_x += X[0,j] / div
            num_y += X[1,j] / div
            num_z += X[2,j] / div
            denum += 1./div
            d += div**2 # distance (to the median) to miminize
        dist.append(d) # update of the distance evolution

        if denum == 0.:
            warnings.warn( "Couldn't compute a geometric median, please check your data!" )
            return [0,0,0]

        y = [num_x/denum, num_y/denum, num_z/denum] # update to the new value of the median
        if i > 3:
            convergence=(abs(dist[i]-dist[i-2])<0.1) # we test the convergence over three steps for stability
            #~ print abs(dist[i]-dist[i-2]), convergence
        i += 1
    if i == numIter:
        raise ValueError( "The Weiszfeld's algoritm did not converged after"+str(numIter)+"iterations !!!!!!!!!" )
    # -- When convergence or iterations limit is reached we assume that we found the median.

    return np.array(y)
项目:BirdCLEF2017    作者:kahst    | 项目源码 | 文件源码
def hasBird(spec, threshold=16):

    #working copy
    img = spec.copy()

    #STEP 1: Median blur
    img = cv2.medianBlur(img,5)

    #STEP 2: Median threshold
    col_median = np.median(img, axis=0, keepdims=True)
    row_median = np.median(img, axis=1, keepdims=True)

    img[img < row_median * 3] = 0
    img[img < col_median * 4] = 0
    img[img > 0] = 1

    #STEP 3: Remove singles
    img = filter_isolated_cells(img, struct=np.ones((3,3)))

    #STEP 4: Morph Closing
    img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, np.ones((5,5), np.float32))

    #STEP 5: Frequency crop
    img = img[128:-16, :]

    #STEP 6: Count columns and rows with signal
    #(Note: We only use rows with signal as threshold, but columns might come in handy in other scenarios)

    #column has signal?
    col_max = np.max(img, axis=0)
    col_max = ndimage.morphology.binary_dilation(col_max, iterations=2).astype(col_max.dtype)
    cthresh = col_max.sum()

    #row has signal?
    row_max = np.max(img, axis=1)
    row_max = ndimage.morphology.binary_dilation(row_max, iterations=2).astype(row_max.dtype)
    rthresh = row_max.sum()

    #final threshold
    thresh = rthresh

    #DBUGB: show?
    #print thresh
    #cv2.imshow('BIRD?', img)
    #cv2.waitKey(-1)

    #STEP 7: Apply threshold (Default = 16)
    bird = True
    if thresh < threshold:
        bird = False

    return bird, thresh

######################################################

#elist all bird species
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def gradient_threshold(in_file, in_segm, thresh=1.0, out_file=None):
    """ Compute a threshold from the histogram of the magnitude gradient image """
    import os.path as op
    import numpy as np
    import nibabel as nb
    from scipy import ndimage as sim

    struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2)

    if out_file is None:
        fname, ext = op.splitext(op.basename(in_file))
        if ext == '.gz':
            fname, ext2 = op.splitext(fname)
            ext = ext2 + ext
        out_file = op.abspath('{}_gradmask{}'.format(fname, ext))

    imnii = nb.load(in_file)

    hdr = imnii.get_header().copy()
    hdr.set_data_dtype(np.uint8)  # pylint: disable=no-member

    data = imnii.get_data().astype(np.float32)

    mask = np.zeros_like(data, dtype=np.uint8)  # pylint: disable=no-member
    mask[data > 15.] = 1

    segdata = nb.load(in_segm).get_data().astype(np.uint8)
    segdata[segdata > 0] = 1
    segdata = sim.binary_dilation(segdata, struc, iterations=2, border_value=1).astype(np.uint8)  # pylint: disable=no-member
    mask[segdata > 0] = 1

    mask = sim.binary_closing(mask, struc, iterations=2).astype(np.uint8)  # pylint: disable=no-member
    # Remove small objects
    label_im, nb_labels = sim.label(mask)
    artmsk = np.zeros_like(mask)
    if nb_labels > 2:
        sizes = sim.sum(mask, label_im, list(range(nb_labels + 1)))
        ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
        for _, label in ordered[2:]:
            mask[label_im == label] = 0
            artmsk[label_im == label] = 1

    mask = sim.binary_fill_holes(mask, struc).astype(np.uint8)  # pylint: disable=no-member

    nb.Nifti1Image(mask, imnii.get_affine(), hdr).to_filename(out_file)
    return out_file
项目:laplacian-meshes    作者:bmershon    | 项目源码 | 文件源码
def makeMesh(self):
        if self.Xs.shape[2] == 0:
            return
        X = self.Xs[:, :, 0]
        Y = self.Ys[:, :, 0]
        Z = self.Zs[:, :, 0]
        mesh = PolyMesh()
        #Come up with vertex indices in the mask
        Mask = np.array(self.Mask, dtype=np.int32)
        nV = np.sum(Mask)
        Mask[self.Mask > 0] = np.arange(nV) + 1
        Mask = Mask - 1
        VPos = np.zeros((nV, 3))
        VPos[:, 0] = X[self.Mask > 0]
        VPos[:, 1] = Y[self.Mask > 0]
        VPos[:, 2] = -Z[self.Mask > 0]
        #Add lower right triangle
        v1 = Mask[0:-1, 0:-1].flatten()
        v2 = Mask[1:, 0:-1].flatten()
        v3 = Mask[1:, 1:].flatten()
        N = v1.size
        ITris1 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
        #Add upper right triangle
        v1 = Mask[0:-1, 0:-1].flatten()
        v2 = Mask[1:, 1:].flatten()
        v3 = Mask[0:-1, 1:].flatten()
        N = v1.size
        ITris2 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
        ITris = np.concatenate((ITris1, ITris2), 0)
        #Only retain triangles which have all three points
        ITris = ITris[np.sum(ITris == -1, 1) == 0, :]
        mesh.VPos = VPos
        mesh.ITris = ITris
        mesh.VColors = 0.5*np.ones(mesh.VPos.shape)
        mesh.updateNormalBuffer()
        mesh.VPosVBO = vbo.VBO(np.array(mesh.VPos, dtype=np.float32))
        mesh.VNormalsVBO = vbo.VBO(np.array(mesh.VNormals, dtype=np.float32))
        mesh.VColorsVBO = vbo.VBO(np.array(mesh.VColors, dtype=np.float32))
        mesh.IndexVBO = vbo.VBO(mesh.ITris, target=GL_ELEMENT_ARRAY_BUFFER)
        mesh.needsDisplayUpdate = False
        self.mesh = mesh

    #Compute the delta coordinates and solvers for all frames of the video
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_cotangent_laplacian_smoothing_force(topomesh):

    compute_topomesh_property(topomesh,'vertices',degree=2)
    compute_topomesh_property(topomesh,'length',degree=1)

    triangle_vertices = topomesh.wisp_property('vertices',degree=2).values(list(topomesh.wisps(2)))
    rotated_triangle_vertices = np.transpose([triangle_vertices[:,2],triangle_vertices[:,0],triangle_vertices[:,1]])
    antirotated_triangle_vertices = np.transpose([triangle_vertices[:,1],triangle_vertices[:,2],triangle_vertices[:,0]])
    triangle_vertices = np.append(np.append(triangle_vertices,rotated_triangle_vertices,axis=0),antirotated_triangle_vertices,axis=0)

    edge_index_list = np.array([[1, 2],[0, 2],[0, 1]])
    triangle_edge_vertices = triangle_vertices[:,edge_index_list]

    triangle_edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,1]) - topomesh.wisp_property('barycenter',degree=0).values(triangle_edge_vertices[...,0])
    #triangle_edge_lengths = np.power(np.sum(np.power(triangle_edge_vectors,2.0),axis=2),0.5)
    triangle_edge_lengths = np.linalg.norm(triangle_edge_vectors,axis=2)
    triangle_edge_directions = triangle_edge_vectors/triangle_edge_lengths[...,np.newaxis]

    triangle_perimeters = np.sum(triangle_edge_lengths,axis=1)
    triangle_areas = np.sqrt((triangle_perimeters/2.0)*(triangle_perimeters/2.0-triangle_edge_lengths[:,0])*(triangle_perimeters/2.0-triangle_edge_lengths[:,1])*(triangle_perimeters/2.0-triangle_edge_lengths[:,2]))

    triangle_cosines = np.zeros_like(triangle_edge_lengths,np.float32)
    triangle_cosines[:,0] = (triangle_edge_lengths[:,1]**2+triangle_edge_lengths[:,2]**2-triangle_edge_lengths[:,0]**2)/(2.0*triangle_edge_lengths[:,1]*triangle_edge_lengths[:,2])
    triangle_cosines[:,1] = (triangle_edge_lengths[:,2]**2+triangle_edge_lengths[:,0]**2-triangle_edge_lengths[:,1]**2)/(2.0*triangle_edge_lengths[:,2]*triangle_edge_lengths[:,0])
    triangle_cosines[:,2] = (triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,1]**2-triangle_edge_lengths[:,2]**2)/(2.0*triangle_edge_lengths[:,0]*triangle_edge_lengths[:,1])
    triangle_cosines[np.where(np.abs(triangle_cosines) < np.power(10.,-6))] = np.power(10.,-6)
    triangle_angles = np.arccos(triangle_cosines)

    triangle_sinuses = np.zeros_like(triangle_edge_lengths,np.float32)
    triangle_sinuses[:,0] = np.sqrt(np.array(1.0 - np.power(triangle_edge_lengths[:,1]**2+triangle_edge_lengths[:,2]**2-triangle_edge_lengths[:,0]**2,2.0)/np.power(2.0*triangle_edge_lengths[:,1]*triangle_edge_lengths[:,2],2.0),np.float16))
    triangle_sinuses[:,1] = np.sqrt(np.array(1.0 - np.power(triangle_edge_lengths[:,2]**2+triangle_edge_lengths[:,0]**2-triangle_edge_lengths[:,1]**2,2.0)/np.power(2.0*triangle_edge_lengths[:,2]*triangle_edge_lengths[:,0],2.0),np.float16))
    triangle_sinuses[:,2] = np.sqrt(np.array(1.0 - np.power(triangle_edge_lengths[:,0]**2+triangle_edge_lengths[:,1]**2-triangle_edge_lengths[:,2]**2,2.0)/np.power(2.0*triangle_edge_lengths[:,0]*triangle_edge_lengths[:,1],2.0),np.float16))
    triangle_sinuses[np.where(triangle_sinuses < np.power(10.,-6))] = np.power(10.,-6)

    triangle_cotangent_weights = (triangle_cosines/triangle_sinuses)
    #triangle_cotangent_weights = 1./triangle_edge_lengths
    #triangle_cotangent_weights = 0.5*(triangle_cosines/triangle_sinuses)/(triangle_edge_lengths + np.power(10.,-10))
    #triangle_cotangent_vectors = (triangle_cosines/triangle_sinuses)[...,np.newaxis] * triangle_edge_vectors/triangle_edge_lengths[:,np.newaxis]
    triangle_cotangent_vectors = triangle_cotangent_weights[...,np.newaxis] * triangle_edge_vectors
    #triangle_cotangent_vectors = triangle_cotangent_weights[...,np.newaxis] * triangle_edge_directions
    # triangle_cotangent_vectors = 1./np.tan(triangle_angles)[...,np.newaxis] * triangle_edge_vectors

    vertex_cotangent_force = np.transpose([nd.sum(triangle_cotangent_vectors[:,1,d]+triangle_cotangent_vectors[:,2,d],triangle_vertices[:,0],index=np.array(list(topomesh.wisps(0)))) for d in xrange(3)])
    vertex_cotangent_sum = nd.sum(triangle_cotangent_weights[:,1] + triangle_cotangent_weights[:,2],triangle_vertices[:,0],index=np.array(list(topomesh.wisps(0))))

    vertex_area = nd.sum(triangle_areas,triangle_vertices[:,0],index=np.array(list(topomesh.wisps(0))))

    #laplacian_force = vertex_cotangent_force
    laplacian_force = vertex_cotangent_force/vertex_cotangent_sum[...,np.newaxis]
    #laplacian_force = vertex_cotangent_force/(4.*vertex_area[...,np.newaxis])
    laplacian_force[np.where(np.isnan(laplacian_force))] = 0.0
    laplacian_force[np.where(np.isinf(laplacian_force))] = 0.0

    return laplacian_force
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_taubin_smoothing_force(topomesh,gaussian_sigma=10.0,positive_factor=0.33,negative_factor=-0.34,cellwise_smoothing=True):

    if not topomesh.has_wisp_property('vertices',degree=1,is_computed=True):
        compute_topomesh_property(topomesh,'vertices',degree=1)

    if cellwise_smoothing:
        edge_vertices = topomesh.wisp_property('vertices',degree=1).values(np.concatenate([np.array(list(topomesh.borders(3,c,2)),int) for c in topomesh.wisps(3)]))
    else:
        edge_vertices = topomesh.wisp_property('vertices',degree=1).values()

    reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
    edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)

    edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])
    edge_lengths = np.linalg.norm(edge_vectors,axis=1)
    gaussian_edge_lengths = np.exp(-np.power(edge_lengths,2.0)/np.power(gaussian_sigma,2.0))
    gaussian_edge_vectors = gaussian_edge_lengths[:,np.newaxis]*edge_vectors

    taubin_force = np.transpose([nd.sum(gaussian_edge_vectors[:,0],edge_vertices[:,0],index=list(topomesh.wisps(0))),
                                 nd.sum(gaussian_edge_vectors[:,1],edge_vertices[:,0],index=list(topomesh.wisps(0))),
                                 nd.sum(gaussian_edge_vectors[:,2],edge_vertices[:,0],index=list(topomesh.wisps(0)))])
    #vertices_weights = 1.+nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
    vertices_weights = nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
    taubin_positive_force = positive_factor*taubin_force/vertices_weights[:,np.newaxis]

    taubin_positions = array_dict(topomesh.wisp_property('barycenter',degree=0).values(list(topomesh.wisps(0)))+taubin_positive_force, list(topomesh.wisps(0)))

    edge_vectors = taubin_positions.values(edge_vertices[:,1]) - taubin_positions.values(edge_vertices[:,0])
    edge_lengths = np.linalg.norm(edge_vectors,axis=1)
    gaussian_edge_lengths = np.exp(-np.power(edge_lengths,2.0)/np.power(gaussian_sigma,2.0))
    gaussian_edge_vectors = gaussian_edge_lengths[:,np.newaxis]*edge_vectors

    taubin_force = np.transpose([nd.sum(gaussian_edge_vectors[:,0],edge_vertices[:,0],index=list(topomesh.wisps(0))),
                                 nd.sum(gaussian_edge_vectors[:,1],edge_vertices[:,0],index=list(topomesh.wisps(0))),
                                 nd.sum(gaussian_edge_vectors[:,2],edge_vertices[:,0],index=list(topomesh.wisps(0)))])
    #vertices_weights = 1.+nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
    vertices_weights = nd.sum(gaussian_edge_lengths,edge_vertices[:,0],index=list(topomesh.wisps(0)))
    taubin_negative_force = negative_factor*taubin_force/vertices_weights[:,np.newaxis]

    taubin_force = taubin_positive_force + taubin_negative_force

    return taubin_force
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_laplacian_epidermis_convexity_force(topomesh):
    """todo"""

    if not topomesh.has_wisp_property('vertices',degree=1,is_computed=True):
        compute_topomesh_property(topomesh,'vertices',degree=1)

    if not topomesh.has_wisp_property('barycenter',degree=3,is_computed=True):
        compute_topomesh_property(topomesh,'barycenter',degree=3)

    if not topomesh.has_wisp_property('epidermis',degree=1,is_computed=True):
        compute_topomesh_property(topomesh,'epidermis',degree=1)

    if not topomesh.has_wisp_property('cells',degree=0,is_computed=True):
        compute_topomesh_property(topomesh,'cells',degree=0)
    if not topomesh.has_wisp_property('cells',degree=1,is_computed=True):
        compute_topomesh_property(topomesh,'cells',degree=1)


    epidermis_convexity_force = array_dict(np.zeros_like(topomesh.wisp_property('barycenter',degree=0).values(),np.float32),np.array(list(topomesh.wisps(0))))

    edge_cells_degree = np.array(map(len,topomesh.wisp_property('cells',degree=1).values()))
    # edge_vertices = topomesh.wisp_property('vertices',degree=1).values()[np.where((topomesh.wisp_property('epidermis',degree=1).values()))]
    # edge_vertices = topomesh.wisp_property('vertices',degree=1).values()[np.where((topomesh.wisp_property('epidermis',degree=1).values())&(edge_cells_degree>1))]
    edge_vertices = topomesh.wisp_property('vertices',degree=1).values()[np.where((edge_cells_degree>2)|((topomesh.wisp_property('epidermis',degree=1).values())&(edge_cells_degree>1)))]

    epidermis_vertices = np.unique(edge_vertices)
    vertices_degrees = np.array(nd.sum(np.ones_like(edge_vertices),edge_vertices,index=epidermis_vertices),int)

    reversed_edge_vertices = np.transpose([edge_vertices[:,1],edge_vertices[:,0]])
    edge_vertices = np.append(edge_vertices,reversed_edge_vertices,axis=0)

    edge_vectors = topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,1]) - topomesh.wisp_property('barycenter',degree=0).values(edge_vertices[:,0])

    laplacian_force = np.transpose([nd.sum(edge_vectors[:,0],edge_vertices[:,0],index=epidermis_vertices),
                                    nd.sum(edge_vectors[:,1],edge_vertices[:,0],index=epidermis_vertices),
                                    nd.sum(edge_vectors[:,2],edge_vertices[:,0],index=epidermis_vertices)])

    laplacian_force = laplacian_force/vertices_degrees[:,np.newaxis]

    # vertex_cell_barycenter = np.array([np.mean(topomesh.wisp_property('barycenter',degree=3).values(c),axis=0)  for c in topomesh.wisp_property('cells',0).values(epidermis_vertices)])
    # vertices_directions = topomesh.wisp_property('barycenter',degree=0).values(epidermis_vertices) - vertex_cell_barycenter
    # vertices_directions = vertices_directions/np.linalg.norm(vertices_directions,axis=1)[:,np.newaxis]
    # vertices_products = np.einsum('ij,ij->i',laplacian_force,vertices_directions)
    # convex_points = np.where(vertices_products<0.)[0]
    # laplacian_force[convex_points] -= vertices_directions[convex_points]*vertices_products[convex_points,np.newaxis]
    # laplacian_force[convex_points] = np.array([0.,0.,0.])

    epidermis_convexity_force.update(laplacian_force,keys=epidermis_vertices,erase_missing_keys=False)

    return epidermis_convexity_force.values()
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def property_topomesh_epidermis_planarization_force(topomesh):
    if not topomesh.has_wisp_property('epidermis',degree=2,is_computed=True):
        compute_topomesh_property(topomesh,'epidermis',degree=2)
    if not topomesh.has_wisp_property('epidermis',degree=3,is_computed=True):
        compute_topomesh_property(topomesh,'epidermis',degree=3)
    if not topomesh.has_wisp_property('epidermis',degree=0,is_computed=True):
        compute_topomesh_property(topomesh,'epidermis',degree=0)

    if not topomesh.has_wisp_property('cells',degree=0,is_computed=True):
        compute_topomesh_property(topomesh,'cells',degree=0)

    if not topomesh.has_wisp_property('barycenter',degree=2,is_computed=True):
        compute_topomesh_property(topomesh,'barycenter',2)
    if not topomesh.has_wisp_property('barycenter',degree=3,is_computed=True):
        compute_topomesh_property(topomesh,'barycenter',3)

    if not topomesh.has_wisp_property('length',degree=1,is_computed=True):
        compute_topomesh_property(topomesh,'length',degree=1)
    if not topomesh.has_wisp_property('area',degree=2,is_computed=True):
        compute_topomesh_property(topomesh,'area',degree=2)
    if not topomesh.has_wisp_property('normal',degree=2,is_computed=True):
        compute_topomesh_property(topomesh,'normal',degree=2)

    epidermis_triangles = np.array(list(topomesh.wisps(2)))[topomesh.wisp_property('epidermis',2).values()]
    epidermis_cells = np.array(list(topomesh.wisps(3)))[topomesh.wisp_property('epidermis',3).values()]
    epidermis_vertices = np.array(list(topomesh.wisps(0)))[topomesh.wisp_property('epidermis',0).values()]

    triangle_cells = np.concatenate(topomesh.wisp_property('cells',2).values(epidermis_triangles))
    triangle_areas = topomesh.wisp_property('area',2).values(epidermis_triangles)
    triangle_centers = topomesh.wisp_property('barycenter',2).values(epidermis_triangles)
    triangle_weighted_normals = triangle_areas[:,np.newaxis]*topomesh.wisp_property('normal',2).values(epidermis_triangles)

    cell_areas = nd.sum(triangle_areas,triangle_cells,index=epidermis_cells)
    cell_epidermis_centers = np.transpose([nd.sum(triangle_areas*triangle_centers[:,k],triangle_cells,index=epidermis_cells) for k in xrange(3)])
    cell_epidermis_centers = cell_epidermis_centers/cell_areas[:,np.newaxis]
    cell_epidermis_centers = array_dict(cell_epidermis_centers,epidermis_cells)

    cell_weighted_normals = np.transpose([nd.sum(triangle_weighted_normals[:,k],triangle_cells,index=epidermis_cells) for k in xrange(3)])
    cell_normals = cell_weighted_normals/cell_areas[:,np.newaxis]
    cell_normals = array_dict(cell_normals/np.linalg.norm(cell_normals,axis=1)[:,np.newaxis],epidermis_cells)

    vertex_cells = np.concatenate([np.intersect1d(np.array(topomesh.wisp_property('cells',0)[v],int),epidermis_cells) for v in epidermis_vertices])
    vertex_cell_vertex = np.concatenate([v*np.ones_like(np.intersect1d(topomesh.wisp_property('cells',0)[v],epidermis_cells),int) for v in epidermis_vertices])

    vertex_cell_normals = cell_normals.values(vertex_cells)
    #vertex_cell_normals = np.array([cell_normals[c] if cell_normals.has_key(c) else np.array([0,0,1]) for c in vertex_cells])
    vertex_cell_centers = cell_epidermis_centers.values(vertex_cells)
    #vertex_cell_centers = np.array([cell_epidermis_centers[c] if cell_epidermis_centers.has_key(c) else topomesh.wisp_property('barycenter',3)[c]])
    vertex_cell_vectors = topomesh.wisp_property('barycenter',0).values(vertex_cell_vertex) - vertex_cell_centers
    vertex_cell_projectors = -np.einsum('ij,ij->i',vertex_cell_vectors,vertex_cell_normals)[:,np.newaxis]*vertex_cell_normals

    vertex_projectors = np.transpose([nd.sum(vertex_cell_projectors[:,k],vertex_cell_vertex,index=list(topomesh.wisps(0))) for k in xrange(3)])
    vertex_projectors[np.isnan(vertex_projectors)] = 0.
    return vertex_projectors
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def topomesh_split_edge(topomesh,eid):
    pid_to_keep, pid_to_unlink = topomesh.borders(1,eid)

    edge_fids = list(topomesh.regions(1,eid))
    eids_to_unlink = np.array([list(set(list(topomesh.borders(2,fid))).intersection(set(list(topomesh.regions(0,pid_to_unlink)))).difference({eid}))[0] for fid in edge_fids])
    pids_split = np.array([list(set(list(topomesh.borders(2,fid,2))).difference({pid_to_keep, pid_to_unlink}))[0] for fid in edge_fids])

    pid_to_add = topomesh.add_wisp(0)
    topomesh.unlink(1,eid,pid_to_unlink)
    topomesh.link(1,eid,pid_to_add)
    # print eid," : ",pid_to_keep,pid_to_add

    topomesh.wisp_property("barycenter",0)[pid_to_add] = (topomesh.wisp_property("barycenter",0).values([pid_to_keep,pid_to_unlink]).sum(axis=0))/2.

    eid_to_add = topomesh.add_wisp(1)
    topomesh.link(1,eid_to_add,pid_to_add)
    topomesh.link(1,eid_to_add,pid_to_unlink)
    # print "Split ",eid_to_add," : ",pid_to_add,pid_to_unlink

    for fid, eid_to_unlink, pid_split in zip(edge_fids,eids_to_unlink,pids_split):

        eid_split = topomesh.add_wisp(1)
        topomesh.link(1,eid_split,pid_to_add)
        topomesh.link(1,eid_split,pid_split)
        # print "Added ",eid_split," : ",pid_to_add,pid_split

        topomesh.unlink(2,fid,eid_to_unlink)
        topomesh.link(2,fid,eid_split)

        fid_to_add = topomesh.add_wisp(2)
        topomesh.link(2,fid_to_add,eid_to_add)
        topomesh.link(2,fid_to_add,eid_to_unlink)
        topomesh.link(2,fid_to_add,eid_split)

        for cid in topomesh.regions(2,fid):
            topomesh.link(3,cid,fid_to_add)

    # edge_borders = np.array(map(len,map(np.unique,[list(topomesh.borders(1,e)) for e in topomesh.wisps(1)])))
    # if np.min(edge_borders) == 1:
    # edge_vertices = np.sort(np.array([list(topomesh.borders(1,e)) for e in topomesh.wisps(1) if  topomesh.nb_borders(1,e) == 2]))
    # edge_vertex_id = edge_vertices[:,0]*10000 + edge_vertices[:,1]
    # if edge_vertices.shape[0] != array_unique(edge_vertices).shape[0]:
    #     print eid," split error : (",pid_to_keep,pid_to_unlink,")",np.array(list(topomesh.wisps(1)))[nd.sum(np.ones_like(edge_vertex_id),edge_vertex_id,index=edge_vertex_id)>1]
    #     raw_input()

    return True
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def compute_topomesh_cell_property_from_faces(topomesh, property_name, aggregate='mean', weighting='area'):
    """Compute a property on degree 3 using the same property defined at degree 2.

    The cell property is computed by averaging or summing the properties of its
    border faces, weighting them differently according to the chosen method.

    Args:
        topomesh (:class:`openalea.cellcomplex.property_topomesh.PropertyTopomesh`): 
            The structure on which to compute the property.
        property_name (str): 
            The name of the property to compute (must be already computed on faces).
        aggregate (str): 
            The way weighted face properties are combined to compute the cell property  (default is *mean*):
                * *mean*: the cell property is the weighted average of face properties
                * *sum*: the cell property is the weighted sum of face properties
        weighting (str): 
            The way weights are assigned to each face to compute the property (default is *area*):
                * *uniform*: all the faces have the same weight (1)
                * *area*: the weight on the faces is equal to their area

    Returns:
        None

    Note: 
        The PropertyTopomesh passed as argument is updated.

    """

    start_time = time()
    print "--> Computing cell property from faces"

    assert topomesh.has_wisp_property(property_name,2,is_computed=True)
    assert weighting in ['uniform','area']

    face_property = topomesh.wisp_property(property_name,2)

    cell_faces = np.concatenate([list(topomesh.borders(3,c)) for c in topomesh.wisps(3)]).astype(np.uint32)
    cell_face_cells = np.concatenate([c*np.ones(topomesh.nb_borders(3,c)) for c in topomesh.wisps(3)]).astype(np.uint32)

    if weighting == 'uniform':
        cell_face_weight = np.ones_like(cell_faces)
    elif weighting == 'area':
        compute_topomesh_property(topomesh,'area',2)
        cell_face_weight = topomesh.wisp_property('area',2).values(cell_faces)

    cell_face_property = face_property.values(cell_faces)

    if cell_face_property.ndim == 1:
        cell_property = nd.sum(cell_face_weight*cell_face_property,cell_face_cells,index=list(topomesh.wisps(3)))/nd.sum(cell_face_weight,cell_face_cells,index=list(topomesh.wisps(3)))
    elif cell_face_property.ndim == 2:
        cell_property = np.transpose([nd.sum(cell_face_weight*cell_face_property[:,k],cell_face_cells,index=list(topomesh.wisps(3)))/nd.sum(cell_face_weight,cell_face_cells,index=list(topomesh.wisps(3))) for k in xrange(cell_face_property.shape[1])])
    elif cell_face_property.ndim == 3:
        cell_property = np.transpose([[nd.sum(cell_face_weight*cell_face_property[:,j,k],cell_face_cells,index=list(topomesh.wisps(3)))/nd.sum(cell_face_weight,cell_face_cells,index=list(topomesh.wisps(3))) for k in xrange(cell_face_property.shape[2])] for j in xrange(cell_face_property.shape[1])])

    topomesh.update_wisp_property(property_name,degree=3,values=array_dict(cell_property,keys=list(topomesh.wisps(3))))

    end_time = time()
    print "<-- Computing cell property from faces [",end_time-start_time,"s]"
项目:cellcomplex    作者:VirtualPlants    | 项目源码 | 文件源码
def density_contour_plot(figure,X,Y,color,XY_range=None,xlabel="",ylabel="",n_points=100,n_contours=10,smooth_factor=1.0,linewidth=1,marker_size=40.,alpha=1.0,label=""):
    font = fm.FontProperties(family = 'Trebuchet', weight ='light')
    figure.patch.set_facecolor('white')
    axes = figure.add_subplot(111)

    if XY_range is None:
        XY_range = [[X.min(),X.max()],[Y.min(),Y.max()]]

    range_x = np.linspace(XY_range[0][0],XY_range[0][1],n_points)
    range_y = np.linspace(XY_range[1][0],XY_range[1][1],n_points)
    xx, yy = np.meshgrid(range_x,range_y)

    range_x_cr = (range_x - range_x.mean())/range_x.std()
    range_y_cr = (range_y - range_y.mean())/range_y.std()
    xx_cr, yy_cr = np.meshgrid(range_x_cr,range_y_cr)


    def density_function(positions,radius=1,k=0.1):
        def density_func(x,y):
            points = np.array(positions.values())

            if len((x+y).shape) == 1:
                distances = np.power(np.power(x[np.newaxis] - points[:,0,np.newaxis],2) +  np.power(y[np.newaxis] - points[:,1,np.newaxis],2),0.5)
            elif len((x+y).shape) == 2:
                distances = np.power(np.power(x[np.newaxis] - points[:,0,np.newaxis,np.newaxis],2) +  np.power(y[np.newaxis] - points[:,1,np.newaxis,np.newaxis],2),0.5)

            density_potential = 1./2. * (1. - np.tanh(k*(distances - radius)))
            density = density_potential.sum(axis=0)

            return density
        return density_func

    positions = dict(zip(range(len(X)),np.transpose([X,Y])))
    positions_cr = dict(zip(range(len(X)),np.transpose([(X-range_x.mean())/range_x.std(),(Y-range_y.mean())/range_y.std()])))

    # radius = np.sqrt((X.std() * Y.std()))
    radius = 10.0/n_points
    density_k = 100.*np.exp(-smooth_factor/2.)
    data_density = density_function(positions_cr,radius,density_k)(xx_cr,yy_cr)

    levels = np.linspace(0,data_density.max(),n_contours)
    colors = np.array([(lev*color + (n_contours-1-lev)*np.ones(3))/(n_contours-1) for lev in xrange(n_contours)])

    axes.scatter(X,Y,s=marker_size,c=color,linewidth=0,alpha=alpha/2.,label=label)
    axes.contour(xx,yy,data_density,linewidths=linewidth,levels=levels,colors=colors,alpha=(alpha+1)/2.)

    axes.set_xlim(*tuple(XY_range[0]))
    axes.set_xlabel(xlabel,fontproperties=font, size=10, style='italic')
    axes.set_xticklabels(axes.get_xticks(),fontproperties=font, size=12)
    axes.set_ylim(*tuple(XY_range[1]))
    axes.set_ylabel(ylabel, fontproperties=font, size=10, style='italic')
    axes.set_yticklabels(axes.get_yticks(),fontproperties=font, size=12)
项目:SCaIP    作者:simonsfoundation    | 项目源码 | 文件源码
def get_contours3d(A, dims, thr=0.9):
    """Gets contour of spatial components and returns their coordinates

     Parameters
     -----------
     A:   np.ndarray or sparse matrix
               Matrix of Spatial components (d x K)
     dims: tuple of ints
               Spatial dimensions of movie (x, y, z)
     thr: scalar between 0 and 1
               Energy threshold for computing contours (default 0.995)

     Returns
     --------
     Coor: list of coordinates with center of mass and
            contour plot coordinates (per layer) for each component
    """
    d, nr = np.shape(A)
    d1, d2, d3 = dims
    x, y = np.mgrid[0:d2:1, 0:d3:1]

    coordinates = []
    cm = np.asarray([center_of_mass(a.reshape(dims, order='F')) for a in A.T])
    for i in range(nr):
        pars = dict()
        indx = np.argsort(A[:, i], axis=None)[::-1]
        cumEn = np.cumsum(A[:, i].flatten()[indx]**2)
        cumEn /= cumEn[-1]
        Bvec = np.zeros(d)
        Bvec[indx] = cumEn
        Bmat = np.reshape(Bvec, dims, order='F')
        pars['coordinates'] = []
        for B in Bmat:
            cs = plt.contour(y, x, B, [thr])
            # this fix is necessary for having disjoint figures and borders plotted correctly
            p = cs.collections[0].get_paths()
            v = np.atleast_2d([np.nan, np.nan])
            for pths in p:
                vtx = pths.vertices
                num_close_coords = np.sum(np.isclose(vtx[0, :], vtx[-1, :]))
                if num_close_coords < 2:
                    if num_close_coords == 0:
                        # case angle
                        newpt = np.round(vtx[-1, :] / [d2, d1]) * [d2, d1]
                        vtx = np.concatenate((vtx, newpt[np.newaxis, :]), axis=0)

                    else:
                        # case one is border
                        vtx = np.concatenate((vtx, vtx[0, np.newaxis]), axis=0)

                v = np.concatenate((v, vtx, np.atleast_2d([np.nan, np.nan])), axis=0)

            pars['coordinates'] += [v]
        pars['CoM'] = np.squeeze(cm[i, :])
        pars['neuron_id'] = i + 1
        coordinates.append(pars)
    return coordinates
项目:imagepy    作者:Image-Py    | 项目源码 | 文件源码
def run(self, ips, imgs, para = None):
        inten = WindowsManager.get(para['inten']).ips
        if not para['slice']:
            imgs = [inten.img]
            msks = [ips.img]
        else: 
            msks = ips.imgs
            if len(msks)==1:
                msks *= len(imgs)
        buf = imgs[0].astype(np.uint16)
        strc = ndimage.generate_binary_structure(2, 1 if para['con']=='4-connect' else 2)
        idct = ['Max','Min','Mean','Variance','Standard','Sum']
        key = {'Max':'max','Min':'min','Mean':'mean',
               'Variance':'var','Standard':'std','Sum':'sum'}
        idct = [i for i in idct if para[key[i]]]
        titles = ['Slice', 'ID'][0 if para['slice'] else 1:] 
        if para['center']: titles.extend(['Center-X','Center-Y'])
        if para['extent']: titles.extend(['Min-Y','Min-X','Max-Y','Max-X'])
        titles.extend(idct)
        k = ips.unit[0]
        data, mark = [], []
        for i in range(len(imgs)):
            n = ndimage.label(msks[i], strc, output=buf)
            index = range(1, n+1)
            dt = []
            if para['slice']:dt.append([i]*n)
            dt.append(range(n))

            xy = ndimage.center_of_mass(imgs[i], buf, index)
            xy = np.array(xy).round(2).T
            if para['center']:dt.extend([xy[1]*k, xy[0]*k])

            boxs = [None] * n
            if para['extent']:
                boxs = ndimage.find_objects(buf)
                boxs = [(i[0].start, i[1].start, i[0].stop, i[1].stop) for i in boxs]
                for j in (0,1,2,3):
                    dt.append([i[j]*k for i in boxs])
            if para['max']:dt.append(ndimage.maximum(imgs[i], buf, index).round(2))
            if para['min']:dt.append(ndimage.minimum(imgs[i], buf, index).round(2))        
            if para['mean']:dt.append(ndimage.mean(imgs[i], buf, index).round(2))
            if para['var']:dt.append(ndimage.variance(imgs[i], buf, index).round(2)) 
            if para['std']:dt.append(ndimage.standard_deviation(imgs[i], buf, index).round(2))
            if para['sum']:dt.append(ndimage.sum(imgs[i], buf, index).round(2))      

            mark.append([(center, cov) for center,cov in zip(xy.T, boxs)]) 
            data.extend(list(zip(*dt)))

        IPy.table(inten.title+'-region statistic', data, titles)
        inten.mark = Mark(mark)
        inten.update = True
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def _sn_func(index, signal=None, noise=None):
    """
    Default function to calculate the S/N of a bin with spaxels "index".

    The Voronoi binning algorithm does not require this function to have a
    specific form and this default one can be changed by the user if needed
    by passing a different function as

        ... = voronoi_2d_binning(..., sn_func=sn_func)

    The S/N returned by sn_func() does not need to be an analytic
    function of S and N.

    There is also no need for sn_func() to return the actual S/N.
    Instead sn_func() could return any quantity the user needs to equalize.

    For example sn_func() could be a procedure which uses ppxf to measure
    the velocity dispersion from the coadded spectrum of spaxels "index"
    and returns the relative error in the dispersion.

    Of course an analytic approximation of S/N, like the one below,
    speeds up the calculation.

    :param index: integer vector of length N containing the indices of
        the spaxels for which the combined S/N has to be returned.
        The indices refer to elements of the vectors signal and noise.
    :param signal: vector of length M>N with the signal of all spaxels.
    :param noise: vector of length M>N with the noise of all spaxels.
    :return: scalar S/N or another quantity that needs to be equalized.
    """

    sn = np.sum(signal[index])/np.sqrt(np.sum(noise[index]**2))

    # The following commented line illustrates, as an example, how one
    # would include the effect of spatial covariance using the empirical
    # Eq.(1) from http://adsabs.harvard.edu/abs/2015A%26A...576A.135G
    # Note however that the formula is not accurate for large bins.
    #
    # sn /= 1 + 1.07*np.log10(index.size)

    return  sn

#----------------------------------------------------------------------
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def _cvt_equal_mass(x, y, signal, noise, xnode, ynode, pixelsize, quiet, sn_func, wvt):
    """
    Implements the modified Lloyd algorithm
    in section 4.1 of Cappellari & Copin (2003).

    NB: When the keyword WVT is set this routine includes
    the modification proposed by Diehl & Statler (2006).

    """
    dens2 = (signal/noise)**4     # See beginning of section 4.1 of CC03
    scale = np.ones_like(xnode)   # Start with the same scale length for all bins

    for it in range(1, xnode.size):  # Do at most xnode.size iterations

        xnode_old, ynode_old = xnode.copy(), ynode.copy()
        classe = voronoi_tessellation(x, y, xnode, ynode, scale)

        # Computes centroids of the bins, weighted by dens**2.
        # Exponent 2 on the density produces equal-mass Voronoi bins.
        # The geometric centroids are computed if WVT keyword is set.
        #
        good = np.unique(classe)
        if wvt:
            for k in good:
                index = np.flatnonzero(classe == k)   # Find subscripts of pixels in bin k.
                xnode[k], ynode[k] = np.mean(x[index]), np.mean(y[index])
                sn = sn_func(index, signal, noise)
                scale[k] = np.sqrt(index.size/sn)  # Eq. (4) of Diehl & Statler (2006)
        else:
            mass = ndimage.sum(dens2, labels=classe, index=good)
            xnode = ndimage.sum(x*dens2, labels=classe, index=good)/mass
            ynode = ndimage.sum(y*dens2, labels=classe, index=good)/mass

        diff2 = np.sum((xnode - xnode_old)**2 + (ynode - ynode_old)**2)
        diff = np.sqrt(diff2)/pixelsize

        if not quiet:
            print('Iter: %4i  Diff: %.4g' % (it, diff))

        if diff < 0.1:
            break

    # If coordinates have changed, re-compute (Weighted) Voronoi Tessellation of the pixels grid
    #
    if diff > 0:
        classe = voronoi_tessellation(x, y, xnode, ynode, scale)
        good = np.unique(classe)  # Check for zero-size Voronoi bins

    # Only return the generators and scales of the nonzero Voronoi bins

    return xnode[good], ynode[good], scale[good], it

#-----------------------------------------------------------------------
项目:procrustes    作者:bmershon    | 项目源码 | 文件源码
def makeMesh(self):
        if self.Xs.shape[2] == 0:
            return
        X = self.Xs[:, :, 0]
        Y = self.Ys[:, :, 0]
        Z = self.Zs[:, :, 0]
        mesh = PolyMesh()
        #Come up with vertex indices in the mask
        Mask = np.array(self.Mask, dtype=np.int32)
        nV = np.sum(Mask)
        Mask[self.Mask > 0] = np.arange(nV) + 1
        Mask = Mask - 1
        VPos = np.zeros((nV, 3))
        VPos[:, 0] = X[self.Mask > 0]
        VPos[:, 1] = Y[self.Mask > 0]
        VPos[:, 2] = -Z[self.Mask > 0]
        #Add lower right triangle
        v1 = Mask[0:-1, 0:-1].flatten()
        v2 = Mask[1:, 0:-1].flatten()
        v3 = Mask[1:, 1:].flatten()
        N = v1.size
        ITris1 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
        #Add upper right triangle
        v1 = Mask[0:-1, 0:-1].flatten()
        v2 = Mask[1:, 1:].flatten()
        v3 = Mask[0:-1, 1:].flatten()
        N = v1.size
        ITris2 = np.concatenate((np.reshape(v1, [N, 1]), np.reshape(v2, [N, 1]), np.reshape(v3, [N, 1])), 1)
        ITris = np.concatenate((ITris1, ITris2), 0)
        #Only retain triangles which have all three points
        ITris = ITris[np.sum(ITris == -1, 1) == 0, :]
        mesh.VPos = VPos
        mesh.ITris = ITris
        mesh.VColors = 0.5*np.ones(mesh.VPos.shape)
        mesh.updateNormalBuffer()
        mesh.VPosVBO = vbo.VBO(np.array(mesh.VPos, dtype=np.float32))
        mesh.VNormalsVBO = vbo.VBO(np.array(mesh.VNormals, dtype=np.float32))
        mesh.VColorsVBO = vbo.VBO(np.array(mesh.VColors, dtype=np.float32))
        mesh.IndexVBO = vbo.VBO(mesh.ITris, target=GL_ELEMENT_ARRAY_BUFFER)
        mesh.needsDisplayUpdate = False
        self.mesh = mesh

    #Compute the delta coordinates and solvers for all frames of the video
项目:ugali    作者:DarkEnergySurvey    | 项目源码 | 文件源码
def observableFractionCDF(self, mask, distance_modulus, mass_min=0.1):
        """
        Compute observable fraction of stars with masses greater than mass_min in each 
        pixel in the interior region of the mask. Incorporates simplistic
        photometric errors.

        ADW: Careful, this function is fragile! The selection here should
             be the same as mask.restrictCatalogToObservable space. However,
             for technical reasons it is faster to do the calculation with
             broadcasting here.
        ADW: This function is currently a rate-limiting step in the likelihood 
             calculation. Could it be faster?
        """
        method = 'step'

        mass_init,mass_pdf,mass_act,mag_1,mag_2 = self.sample(mass_min=mass_min,full_data_range=False)

        mag_1 = mag_1+distance_modulus
        mag_2 = mag_2+distance_modulus

        mask_1,mask_2 = mask.mask_roi_unique.T

        mag_err_1 = mask.photo_err_1(mask_1[:,np.newaxis]-mag_1)
        mag_err_2 = mask.photo_err_2(mask_2[:,np.newaxis]-mag_2)

        # "upper" bound set by maglim
        delta_hi_1 = (mask_1[:,np.newaxis]-mag_1)/mag_err_1
        delta_hi_2 = (mask_2[:,np.newaxis]-mag_2)/mag_err_2

        # "lower" bound set by bins_mag (maglim shouldn't be 0)
        delta_lo_1 = (mask.roi.bins_mag[0]-mag_1)/mag_err_1
        delta_lo_2 = (mask.roi.bins_mag[0]-mag_2)/mag_err_2

        cdf_1 = norm_cdf(delta_hi_1) - norm_cdf(delta_lo_1)
        cdf_2 = norm_cdf(delta_hi_2) - norm_cdf(delta_lo_2)
        cdf = cdf_1*cdf_2

        if method is None or method == 'none':
            comp_cdf = cdf
        elif self.band_1_detection == True:
            comp = mask.mask_1.completeness(mag_1, method=method)
            comp_cdf = comp*cdf
        elif self.band_1_detection == False:
            comp =mask.mask_2.completeness(mag_2, method=method)
            comp_cdf = comp*cdf
        else:
            comp_1 = mask.mask_1.completeness(mag_1, method=method)
            comp_2 = mask.mask_2.completeness(mag_2, method=method)
            comp_cdf = comp_1*comp_2*cdf

        observable_fraction = (mass_pdf[np.newaxis]*comp_cdf).sum(axis=-1)
        return observable_fraction[mask.mask_roi_digi[mask.roi.pixel_interior_cut]]