我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用scipy.ndimage.map_coordinates()。
def _shift_interp(array, shift_value, mode='constant', cval=0): # Manual alternative to built-in function: slightly slower Ndim = array.ndim dims = array.shape dtype = array.dtype.kind if (Ndim == 1): pass elif (Ndim == 2): x, y = np.meshgrid(np.arange(dims[1], dtype=dtype), np.arange(dims[0], dtype=dtype)) x -= shift_value[0] y -= shift_value[1] shifted = ndimage.map_coordinates(img, [y, x], mode=mode, cval=cval) return shifted
def _rotate_interp(array, alpha, center, mode='constant', cval=0): ''' Rotation around a provided center This is the only way to be sure where exactly is the center of rotation. ''' dtype = array.dtype dims = array.shape alpha_rad = -np.deg2rad(alpha) x, y = np.meshgrid(np.arange(dims[1], dtype=dtype), np.arange(dims[0], dtype=dtype)) xp = (x-center[0])*np.cos(alpha_rad) + (y-center[1])*np.sin(alpha_rad) + center[0] yp = -(x-center[0])*np.sin(alpha_rad) + (y-center[1])*np.cos(alpha_rad) + center[1] rotated = ndimage.map_coordinates(img, [yp, xp], mode=mode, cval=cval, order=3) return rotated
def _scale_interp(array, scale_value, center, mode='constant', cval=0): Ndim = array.ndim dims = array.shape dtype = array.dtype.kind if (Ndim == 1): pass elif (Ndim == 2): x, y = np.meshgrid(np.arange(dims[1], dtype=dtype), np.arange(dims[0], dtype=dtype)) nx = (x - center[0]) / scale_value[0] + center[0] ny = (y - center[1]) / scale_value[1] + center[1] scaled = ndimage.map_coordinates(array, [ny, nx], mode=mode, cval=cval) return scaled
def linePlot(img, x0, y0, x1, y1, resolution=None, order=3): ''' returns [img] intensity values along line defined by [x0, y0, x1, y1] resolution ... number or data points to evaluate order ... interpolation precision ''' if resolution is None: resolution = int( ((x1-x0)**2 + (y1-y0)**2 )**0.5 ) if order == 0: x = np.linspace(x0, x1, resolution, dtype=int) y = np.linspace(y0, y1, resolution, dtype=int) return img[y, x] x = np.linspace(x0, x1, resolution) y = np.linspace(y0, y1, resolution) return map_coordinates(img, np.vstack((y,x)), order=order)
def error_center_line(self, image, npoints = all, order = 1, overlap = True): """Error between worm center line and image Arguments: image (array): gray scale image of worm """ import shapely.geometry as geo if overlap: xyl, xyr, cl = self.sides(npoints = npoints); w = np.ones(npoints); #plt.figure(141); plt.clf(); worm = geo.Polygon(); for i in range(npoints-1): poly = geo.Polygon(np.array([xyl[i,:], xyr[i,:], xyr[i+1,:], xyl[i+1,:]])); ovl = worm.intersection(poly).area; tot = poly.area; w[i+1] = 1 - ovl / tot; worm = worm.union(poly); #print w return np.sum(w * nd.map_coordinates(image.T, cl.T, order = order)) else: cl = self.center_line(npoints = npoints); return np.sum(nd.map_coordinates(image.T, cl.T, order = order))
def reflectance(rad,cosb,t_setx,height,r_setx,s_setx,sang): n=len(t_set) ttmp=[x/dtau for x in t_setx] htmp=[height/dheight for x in t_setx] stmp=[sang-x for x in s_setx] path=ndimage.map_coordinates(path_rad,[ttmp,htmp,stmp]).reshape(n,1) back=ndimage.map_coordinates(back_rad,[ttmp,htmp,stmp]).reshape(n,1) pixel=ndimage.map_coordinates(pixel_rad,[ttmp,htmp,stmp]).reshape(n,1) dir=ndimage.map_coordinates(dir_irad,[ttmp,htmp,stmp]).reshape(n,1) sky=ndimage.map_coordinates(sky_irad,[ttmp,htmp,stmp]).reshape(n,1) env=ndimage.map_coordinates(env_irad,[ttmp,htmp,stmp]).reshape(n,1) sph=ndimage.map_coordinates(sph_alb,[ttmp,htmp,stmp]).reshape(n,1) rayl=ndimage.map_coordinates(tau_rayl,[ttmp,htmp,stmp]).reshape(n,1) aero=ndimage.map_coordinates(tau_aero,[ttmp,htmp,stmp]).reshape(n,1) minor=ndimage.map_coordinates(tau_minor,[ttmp,htmp,stmp]).reshape(n,1) dir=dir*cosb/cosb0 #print dir back=back*(1-r_set0*sph)*r_setx/(1-r_setx*sph)/r_set0 #print back env=env*(1-r_set0*sph)*r_setx/(1-r_setx*sph)/r_set0 odep=rayl+aero+minor S=np.cos(np.pi*sang/180) return np.pi*(rad-path-back)/(dir+sky+env)*np.exp(odep/S)
def interpolate_slice(f3d, rot, pfac=2, size=None): nhalf = f3d.shape[0] / 2 if size is None: phalf = nhalf else: phalf = size / 2 qot = rot * pfac # Scaling! px, py, pz = np.meshgrid(np.arange(-phalf, phalf), np.arange(-phalf, phalf), 0) pr = np.sqrt(px ** 2 + py ** 2 + pz ** 2) pcoords = np.vstack([px.reshape(-1), py.reshape(-1), pz.reshape(-1)]) mcoords = qot.T.dot(pcoords) mcoords = mcoords[:, pr.reshape(-1) < nhalf] pvals = map_coordinates(np.real(f3d), mcoords, order=1, mode="wrap") + \ 1j * map_coordinates(np.imag(f3d), mcoords, order=1, mode="wrap") pslice = np.zeros(pr.shape, dtype=np.complex) pslice[pr < nhalf] = pvals return pslice
def calculate_counts_full(channel, loop, emission_model, flattened_emissivities): """ Calculate the AIA intensity using the wavelength response functions and a full emission model. """ density = loop.density electron_temperature = loop.electron_temperature counts = np.zeros(electron_temperature.shape) itemperature, idensity = emission_model.interpolate_to_mesh_indices(loop) for ion, flat_emiss in zip(emission_model, flattened_emissivities): if flat_emiss is None: continue ionization_fraction = emission_model.get_ionization_fraction(loop, ion) tmp = np.reshape(map_coordinates(flat_emiss.value, np.vstack([itemperature, idensity])), electron_temperature.shape) tmp = u.Quantity(np.where(tmp < 0., 0., tmp), flat_emiss.unit) counts_tmp = ion.abundance*0.83/(4*np.pi*u.steradian)*ionization_fraction*density*tmp if not hasattr(counts, 'unit'): counts = counts*counts_tmp.unit counts += counts_tmp return counts
def apply_elastic(img, indices): return nd.map_coordinates(img, indices, order=1).reshape(img.shape)
def _get_displacement_function(self, f): """ Getter function for calculating the displacement. :param f: field that is used for the displacement, mainly velocity components :returns: function of the Taylor expanded field to first order """ dx = self._distance f_x, f_y = np.gradient(f , dx) f_xx, f_xy = np.gradient(f_x, dx) f_yx, f_yy = np.gradient(f_y, dx) return lambda i, j, x, y : (f[i, j] + x*f_x[i, j] + y*f_y[i, j] + 0.5*(f_xx[i, j]*x**2 + 2*f_xy[i, j]*x*y + f_yy[i, j]*y**2)) #For the bilinear method the build in scipy method `map_coordinates <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.map_coordinates.html>`_ is used with *order* set to 1.
def get_frame(self, i, j): """ Perform interpolation to produce the deformed window for correlation. This function takes the previously set displacement and interpolates the image for these coordinates. If the cubic interpolation method is chosen, the cubic interpolation of this API is use. For the bilinear method the build in scipy method `map_coordinates <https://goo.gl/wucmUO>`_ is used with *order* set to 1. :param int i: first index in grid coordinates :param int j: second index in grid coordinates :returns: interpolated window for the grid coordinates i,j and the image set in initialization """ dws = self._shape[-1] offset_x, offset_y = np.mgrid[-dws/2+0.5:dws/2+0.5, -dws/2+0.5:dws/2+0.5] gx, gy = np.mgrid[0:dws, 0:dws] grid_x = gx + self._distance*i grid_y = gy + self._distance*j ptsax = (grid_x + self._u_disp(i, j, offset_x, offset_y)).ravel() ptsay = (grid_y + self._v_disp(i, j, offset_x, offset_y)).ravel() p, q = self._shape[-2:] if self._ipmethod == 'bilinear': return map_coordinates(self._frame, [ptsax, ptsay], order=1).reshape(p, q) if self._ipmethod == 'cubic': return self._cube_ip.interpolate(ptsax, ptsay).reshape(p, q)
def interpolate(p, axis_values, pixelgrid, order=1, mode='constant', cval=0.0): """ Interpolates in a grid prepared by create_pixeltypegrid(). p is an array of parameter arrays @param p: Npar x Ninterpolate array @type p: array @return: Ndata x Ninterpolate array @rtype: array """ # convert requested parameter combination into a coordinate p_ = np.array([np.searchsorted(av_,val) for av_, val in zip(axis_values,p)]) lowervals_stepsize = np.array([[av_[p__-1], av_[p__]-av_[p__-1]] \ for av_, p__ in zip(axis_values,p_)]) p_coord = (p-lowervals_stepsize[:,0])/lowervals_stepsize[:,1] + p_-1 # interpolate if order > 1: prefilter = False else: prefilter = False return [ndimage.map_coordinates(pixelgrid[...,i],p_coord, order=order, prefilter=prefilter, mode=mode, cval=cval) \ for i in range(np.shape(pixelgrid)[-1])]
def optimize_center_line(self, image, deviation = 5, samples = 20): """Optimize center points separately """ pass #cl = self.center_line(); #ts = self.normals(); #for i in range(self.npoints): # errors = np.zeros(samples); # nd.map_coordinates(z, np.vstack((x,y)))
def intensity_along_curve(image, curve, order = 3): """Returns intensity profile along a curve in an image Arguments: image (array): the grayscale image curve (nx2 array): points defining the curve order (int): interpolation order Returns: n array: intensity values along curve """ return ndi.map_coordinates(image, curve.T[::-1], order = order);
def test(): import numpy as np; import matplotlib.pyplot as plt; import imageprocessing.smoothing as sth; reload(sth); data = np.random.rand(10,2); img = sth.smooth_data(data, bounds = [(0,1),(0,1)], sigma = None, nbins = 100); imgs = sth.smooth_data(data, bounds = [(0,1),(0,1)], sigma = 10, nbins = 100); plt.figure(1); plt.clf(); plt.subplot(1,3,1); plt.plot(data[:,1], data[:,0], '.'); plt.xlim(0,1); plt.ylim(0,1); plt.gca().invert_yaxis() plt.subplot(1,3,2); plt.imshow(img); plt.subplot(1,3,3); plt.imshow(imgs); x,y = img.nonzero() print (x/100.0); print np.sort(data[:,0]) curve = np.vstack([range(30,70), [90-i*1.5 for i in range(40)]]).T; ic0 = sth.intensity_along_curve(imgs, curve); #cc = curve.T; #cc = cc[::-1]; #cc[1] = 100-cc[1]; #import scipy.ndimage as ndi #ic = ndi.map_coordinates(imgs, cc); plt.figure(2); plt.clf(); plt.subplot(1,2,1); plt.imshow(imgs); plt.plot(curve[:,0], curve[:,1], 'r'); #plt.plot(cc[0,:], cc[1,:]) plt.subplot(1,2,2); plt.plot(ic0, 'r') #plt.plot(ic)
def radiance(ref,cosb,t_setx,height,r_setx,sang): if isinstance(t_setx,(int,float)): ttmp=[t_setx/dtau] else: ttmp=t_setx/dtau if isinstance(height,(int,float)): htmp=[height/dheight] else: htmp=height/dheight if isinstance(sang,(int,float)): stmp=[sang-smin] else: stmp=sang-smin path=ndimage.map_coordinates(path_rad,[ttmp,htmp,stmp]) back=ndimage.map_coordinates(back_rad,[ttmp,htmp,stmp]) pixel=ndimage.map_coordinates(pixel_rad,[ttmp,htmp,stmp]) dir=ndimage.map_coordinates(dir_irad,[ttmp,htmp,stmp]) sky=ndimage.map_coordinates(sky_irad,[ttmp,htmp,stmp]) env=ndimage.map_coordinates(env_irad,[ttmp,htmp,stmp]) sph=ndimage.map_coordinates(sph_alb,[ttmp,htmp,stmp]) rayl=ndimage.map_coordinates(tau_rayl,[ttmp,htmp,stmp]) aero=ndimage.map_coordinates(tau_aero,[ttmp,htmp,stmp]) minor=ndimage.map_coordinates(tau_minor,[ttmp,htmp,stmp]) dir=dir*cosb/cosb0 #print dir back=back*(1-r_set0*sph)*r_setx/(1-r_setx*sph)/r_set0 #print back env=env*(1-r_set0*sph)*r_setx/(1-r_setx*sph)/r_set0 odep=rayl+aero+minor S=np.cos(np.pi*sang/180) rad=path+back+ref*(dir+sky+env)/np.exp(odep/S)/np.pi return rad #return np.pi*(rad-path-back)/(dir+sky+env)*np.exp(odep/S) #5/25/2016
def apply_distortion_maps(self, arr, dispx, dispy): """ Applies distortion maps generated from ElasticDistortionState """ origarr = arr.copy() xx, yy = n.mgrid[0:dispx.shape[0], 0:dispx.shape[1]] xx = xx + dispx yy = yy + dispy coords = n.vstack([xx.flatten(), yy.flatten()]) arr = ndimage.map_coordinates(origarr, coords, order=1, mode='nearest') return arr.reshape(origarr.shape)
def resample_volume(vol, r=None, t=None, ori=None, order=3, compat="mrc2014", indexing="ij"): if r is None and t is None: return vol.copy() center = np.array(vol.shape) // 2 x, y, z = np.meshgrid(*[np.arange(-c, c) for c in center], indexing=indexing) xyz = np.vstack([x.reshape(-1), y.reshape(-1), z.reshape(-1), np.ones(x.size)]) if ori is not None: xyz -= ori[:, None] th = np.eye(4) if t is None and r.shape[1] == 4: t = np.squeeze(r[:, 3]) elif t is not None: th[:3, 3] = t rh = np.eye(4) if r is not None: rh[:3:, :3] = r[:3, :3].T xyz = th.dot(rh.dot(xyz))[:3, :] + center[:, None] xyz = np.array([arr.reshape(vol.shape) for arr in xyz]) if "relion" in compat.lower() or "xmipp" in compat.lower(): xyz = xyz[::-1] newvol = map_coordinates(vol, xyz, order=order) return newvol
def sutherland_gaunt_factor(self, wavelength): """ Calculates the free-free gaunt factor calculations of [1]_. The Gaunt factors of [1]_ are read in using `ChiantiPy.tools.io.gffRead` as a function of :math:`u` and :math:`\gamma^2`. The data are interpolated to the appropriate wavelength and temperature values using `~scipy.ndimage.map_coordinates`. References ---------- .. [1] Sutherland, R. S., 1998, MNRAS, `300, 321 <http://adsabs.harvard.edu/abs/1998MNRAS.300..321S>`_ """ # calculate scaled quantities lower_u = ch_const.planck*(1.e8*ch_const.light)/ch_const.boltzmann/np.outer(self.Temperature,wavelength) gamma_squared = (self.Z**2)*ch_const.ryd2erg/ch_const.boltzmann/self.Temperature[:,np.newaxis]*np.ones(lower_u.shape) # convert to index coordinates i_lower_u = (np.log10(lower_u) + 4.)*10. i_gamma_squared = (np.log10(gamma_squared) + 4.)*5. # read in sutherland data gf_sutherland_data = ch_io.gffRead() # interpolate data to scaled quantities gf_sutherland = map_coordinates(gf_sutherland_data['gff'], [i_gamma_squared.flatten(), i_lower_u.flatten()]).reshape(lower_u.shape) return np.where(gf_sutherland < 0., 0., gf_sutherland)
def get_line_profile(self, array): """Retrieve line profile of data on a 2D array :param ndarray array: the data array """ # Extract the values along the line, using cubic interpolation zi = map_coordinates(array, self.profile_coords) return zi
def imresize(img, sz): """ Resize image Input: img: A grayscale image sz: A tuple with the new size (rows, cols) Output: Ir: Resized image """ if np.all(img.shape==sz): return img; factors = (np.array(sz)).astype('f') / (np.array(img.shape)).astype('f') if np.any(factors < 1): # smooth before downsampling sigmas = (1.0/factors)/3.0 #print img.shape, sz, sigmas I_filter = ndimage.filters.gaussian_filter(img,sigmas) else: I_filter = img u,v = np.meshgrid(np.arange(0,sz[1]).astype('f'), np.arange(0,sz[0]).astype('f')) fx = (float(img.shape[1])) / (sz[1]) # multiplicative factors mapping new coords -> old coords fy = (float(img.shape[0])) / (sz[0]) u *= fx; u += (1.0/factors[1])/2 - 1 + 0.5 # sample from correct position v *= fy; v += (1.0/factors[0])/2 - 1 + 0.5 # bilinear interpolation Ir = ndimage.map_coordinates(I_filter, np.vstack((v.flatten().transpose(),u.flatten().transpose())), order=1, mode='nearest') Ir = np.reshape(Ir, (sz[0], sz[1])) return Ir
def warp_zeta(Iref, I, Grel, K, zeta): Xn = backproject(zeta.shape, K) X = np.vstack((Xn, 1/to_z(zeta.flatten()))) # homogeneous coordinate = inverse depth if Grel.shape[0] < 4: Grel = np.vstack((Grel, np.array([0,0,0,1]))) dX = np.dot(Grel[0:3,0:3], Xn)*1/to_z(zeta.flatten()) # derivative of transformation G*X X2 = np.dot(Grel, X) # transform point x2 = np.dot(K, X2[0:3,:]) # project to image plane x2[0,:] /= x2[2,:] # dehomogenize x2[1,:] /= x2[2,:] Iw = ndimage.map_coordinates(I, np.vstack(np.flipud(x2[0:2,:])), order=1, cval=np.nan) Iw = np.reshape(Iw, I.shape) # warped image gIwy,gIwx = np.gradient(Iw) fx = K[0,0]; fy = K[1,1] for i in range(0,3): # dehomogenize X2[i,:] /= X2[3,:] z2 = X2[2,:]**2 dT = np.zeros((2,X2.shape[1])) dT[0,:] = fx/X2[2,:]*dX[0,:] - fx*X2[0,:]/z2*dX[2,:] # derivative of projected point x2 = pi(G*X) dT[1,:] = fy/X2[2,:]*dX[1,:] - fy*X2[1,:]/z2*dX[2,:] # full derivative I(T(x,z)), T(x,z)=pi(G*X) Ig = np.reshape(gIwx.flatten()*dT[0,:] + gIwy.flatten()*dT[1,:], zeta.shape) It = Iw - Iref # 'time' derivative' It[np.where(np.isnan(Iw))] = 0; Ig[np.where( (np.isnan(gIwx).astype('uint8') + np.isnan(gIwy).astype('uint8'))>0 ) ] = 0 return Iw, It, Ig
def interpolationFunction(self,data=None,spline_order=3,cval=None): """Returns a function W(x,y) that interpolates any values on the PMF. W = interpolationFunction(self,data=None,spline_order=3,cval=None) cval is set to data.max() by default (works for the PMF) but for the probability this should be 0 or data.min(). cval cannot be chosen too large or too small or NaN because otherwise the spline interpolation breaks down near the region and produces wild oscillations. """ # see http://www.scipy.org/Cookbook/Interpolation from scipy import ndimage if data is None: data = self.free_energy if cval is None: cval = data.max() _data = data.filled(cval) # fill with max, not 1000 to keep spline happy coeffs = ndimage.spline_filter(_data,order=spline_order) x0,y0 = self.X[0], self.Y[0] dx,dy = self.X[1] - x0, self.Y[1] - y0 def _transform(cnew, c0, dc): return (numpy.atleast_1d(cnew) - c0)/dc def interpolatedF(NMP,LID): """B-spline function over the PMF, W(NMP,LID). Example usage: >>> XX,YY = numpy.mgrid[40:75:0.5,96:150:0.5] >>> FF = FreeEnergy.W(XX,YY) >>> contourf(XX,YY,FF, numpy.linspace(-3,11,100),extend='both') """ return ndimage.map_coordinates(coeffs, numpy.array([_transform(NMP,x0,dx),_transform(LID,y0,dy)]), prefilter=False,mode='constant',cval=cval) return interpolatedF
def interpolationFunction(self,data=None,spline_order=3,cval=None): """Returns a function F(x,y) that interpolates any values of the 2D observable. F = interpolationFunction(self,data=None,spline_order=3,cval=None) cval is set to data.mean() by default. cval cannot be chosen too large or too small or NaN because otherwise the spline interpolation breaks down near the region and produces wild oscillations. """ # see http://www.scipy.org/Cookbook/Interpolation from scipy import ndimage if data is None: data = self.observable if cval is None: cval = data.mean() _data = data.filled(cval) # fill with moderate value, not 1000 to keep spline happy coeffs = ndimage.spline_filter(_data,order=spline_order) x0,y0 = self.mNMP[0], self.mLID[0] dx,dy = self.mNMP[1] - x0, self.mLID[1] - y0 def _transform(cnew, c0, dc): return (numpy.atleast_1d(cnew) - c0)/dc def interpolatedF(NMP,LID): """B-spline function over the 2D observable, F(NMP,LID). Example usage: >>> XX,YY = numpy.mgrid[40:75:0.5,96:150:0.5] >>> FF = Observable.F(XX,YY) >>> contourf(XX,YY,FF, numpy.linspace(-3,11,100),extend='both') """ return ndimage.map_coordinates(coeffs, numpy.array([_transform(NMP,x0,dx),_transform(LID,y0,dy)]), prefilter=False,mode='constant',cval=cval) return interpolatedF
def cpimage(self, fitsdata, save_totalflux=False, order=3): ''' Copy the first image into the image grid specified in the secondaly input image. Arguments: fitsdata: input imagefite.imagefits object. This image will be copied into self. self: input imagefite.imagefits object specifying the image grid where the orgfits data will be copied. save_totalflux (boolean): If true, the total flux of the image will be conserved. ''' # generate output imfits object outfits = copy.deepcopy(self) dx0 = fitsdata.header["dx"] dy0 = fitsdata.header["dy"] Nx0 = fitsdata.header["nx"] Ny0 = fitsdata.header["ny"] Nxr0 = fitsdata.header["nxref"] Nyr0 = fitsdata.header["nyref"] dx1 = outfits.header["dx"] dy1 = outfits.header["dy"] Nx1 = outfits.header["nx"] Ny1 = outfits.header["ny"] Nxr1 = outfits.header["nxref"] Nyr1 = outfits.header["nyref"] coord = np.zeros([2, Nx1 * Ny1]) xgrid = (np.arange(Nx1) + 1 - Nxr1) * dx1 / dx0 + Nxr0 - 1 ygrid = (np.arange(Ny1) + 1 - Nyr1) * dy1 / dy0 + Nyr0 - 1 x, y = np.meshgrid(xgrid, ygrid) coord[0, :] = y.reshape(Nx1 * Ny1) coord[1, :] = x.reshape(Nx1 * Ny1) for idxs in np.arange(outfits.header["ns"]): for idxf in np.arange(outfits.header["nf"]): outfits.data[idxs, idxf] = sn.map_coordinates( fitsdata.data[idxs, idxf], coord, order=order, mode='constant', cval=0.0, prefilter=True).reshape([Ny1, Nx1] ) * dx1 * dy1 / dx0 / dy0 # Flux Scaling if save_totalflux: totalflux = fitsdata.totalflux(istokes=idxs, ifreq=idxf) outfits.data[idxs, idxf] *= totalflux / \ outfits.totalflux(istokes=idxs, ifreq=idxf) outfits.update_fits() return outfits