Python scipy.fftpack 模块,fftshift() 实例源码

我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用scipy.fftpack.fftshift()

项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def calc_ps3d(self):
        """
        Calculate the 3D power spectrum of the image cube.

        The power spectrum is properly normalized to have dimension
        of [K^2 Mpc^3].
        """
        if self.window is not None:
            logger.info("Applying window along frequency axis ...")
            self.cube *= self.window[:, np.newaxis, np.newaxis]

        logger.info("3D FFTing data cube ...")
        cubefft = fftpack.fftshift(fftpack.fftn(self.cube))

        logger.info("Calculating 3D power spectrum ...")
        ps3d = np.abs(cubefft) ** 2  # [K^2]
        # Normalization
        norm1 = 1 / (self.Nx * self.Ny * self.Nz)
        norm2 = 1 / (self.fs_xy**2 * self.fs_z)  # [Mpc^3]
        norm3 = 1 / (2*np.pi)**3
        self.ps3d = ps3d * norm1 * norm2 * norm3  # [K^2 Mpc^3]
        return self.ps3d
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def k_xy(self):
        """
        The k-space coordinates along the (X, Y) spatial dimensions,
        which describe the spatial frequencies.

        NOTE:
        k = 2*pi * f, where "f" is the spatial frequencies, and the
        Fourier dual to spatial transverse distances x/y.

        Unit: [Mpc^-1]
        """
        f_xy = fftpack.fftshift(fftpack.fftfreq(self.Nx, d=self.d_xy))
        k_xy = 2*np.pi * f_xy
        return k_xy
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def k_z(self):
        f_z = fftpack.fftshift(fftpack.fftfreq(self.Nz, d=self.d_z))
        k_z = 2*np.pi * f_z
        return k_z
项目:atoolbox    作者:liweitianux    | 项目源码 | 文件源码
def k_perp(self):
        """
        Comoving wavenumbers perpendicular to the LoS

        NOTE: The Nyquist frequency just located at the first element
              after fftshift when the length is even, and it is negative.
        """
        k_x = self.k_xy
        return k_x[k_x >= 0]
项目:VLTPF    作者:avigan    | 项目源码 | 文件源码
def _shift_fft(array, shift_value):
    Ndim  = array.ndim
    dims  = array.shape
    dtype = array.dtype.kind

    if (dtype != 'f'):
        raise ValueError('Array must be float')

    shifted = array
    if (Ndim == 1):
        Nx = dims[0]

        x_ramp = np.arange(Nx, dtype=array.dtype) - Nx//2

        tilt = (2*np.pi/Nx) * (shift_value[0]*x_ramp)

        cplx_tilt = np.cos(tilt) + 1j*np.sin(tilt)
        cplx_tilt = fft.fftshift(cplx_tilt)
        narray    = fft.fft(fft.ifft(array) * cplx_tilt)
        shifted   = narray.real
    elif (Ndim == 2):
        Nx = dims[0]
        Ny = dims[1]

        x_ramp = np.outer(np.full(Nx, 1.), np.arange(Ny, dtype=array.dtype)) - Nx//2
        y_ramp = np.outer(np.arange(Nx, dtype=array.dtype), np.full(Ny, 1.)) - Ny//2

        tilt = (2*np.pi/Nx) * (shift_value[0]*x_ramp+shift_value[1]*y_ramp)

        cplx_tilt = np.cos(tilt) + 1j*np.sin(tilt)        
        cplx_tilt = fft.fftshift(cplx_tilt)

        narray    = fft.fft2(fft.ifft2(array) * cplx_tilt)
        shifted   = narray.real
    else:
        raise ValueError('This function can shift only 1D or 2D arrays')

    return shifted
项目:yam    作者:trichter    | 项目源码 | 文件源码
def spectral_whitening(data, sr=None, smooth=None, filter=None,
                       waterlevel=1e-8):
    """
    Apply spectral whitening to data

    Data is divided by its smoothed (Default: None) amplitude spectrum.

    :param data: numpy array with data to manipulate
    :param sr: sampling rate (only needed for smoothing)
    :param smooth: length of smoothing window in Hz
        (default None -> no smoothing)
    :param filter: filter spectrum with bandpass after whitening
        (tuple with min and max frequency)
    :param waterlevel: waterlevel relative to mean of spectrum

    :return: whitened data
    """
    mask = np.ma.getmask(data)
    N = len(data)
    nfft = next_fast_len(N)
    spec = fft(data, nfft)
    spec_ampl = np.sqrt(np.abs(np.multiply(spec, np.conjugate(spec))))
    if smooth:
        smooth = int(smooth * N / sr)
        spec_ampl = ifftshift(smooth_func(fftshift(spec_ampl), smooth))
    # save guard against division by 0
    wl = waterlevel * np.mean(spec_ampl)
    spec_ampl[spec_ampl < wl] = wl
    spec /= spec_ampl
    if filter is not None:
        spec *= _filter_resp(*filter, sr=sr, N=len(spec), whole=True)[1]
    ret = np.real(ifft(spec, nfft)[:N])
    return _fill_array(ret, mask=mask, fill_value=0.)
项目:ImageTransformer    作者:ssingal05    | 项目源码 | 文件源码
def fourier(self):
        F1 = fftpack.fft2(self.image)
        F2 = fftpack.fftshift(F1)
        return F2
项目:empymod    作者:empymod    | 项目源码 | 文件源码
def fft(fEM, time, freq, ftarg):
    """Fourier Transform using the Fast Fourier Transform.

    The function is called from one of the modelling routines in :mod:`model`.
    Consult these modelling routines for a description of the input and output
    parameters.

    Returns
    -------
    tEM : array
        Returns time-domain EM response of `fEM` for given `time`.

    conv : bool
        Only relevant for QWE/QUAD.

    """
    # Get ftarg values
    dfreq, nfreq, ntot, pts_per_dec = ftarg

    # If pts_per_dec, we have first to interpolate fEM to required freqs
    if pts_per_dec:
        sfEMr = iuSpline(np.log10(freq), fEM.real)
        sfEMi = iuSpline(np.log10(freq), fEM.imag)
        freq = np.arange(1, nfreq+1)*dfreq
        fEM = sfEMr(np.log10(freq)) + 1j*sfEMi(np.log10(freq))

    # Pad the frequency result
    fEM = np.pad(fEM, (0, ntot-nfreq), 'linear_ramp')

    # Carry out FFT
    ifftEM = fftpack.ifft(np.r_[fEM[1:], 0, fEM[::-1].conj()]).real
    stEM = 2*ntot*fftpack.fftshift(ifftEM*dfreq, 0)

    # Interpolate in time domain
    dt = 1/(2*ntot*dfreq)
    ifEM = iuSpline(np.linspace(-ntot, ntot-1, 2*ntot)*dt, stEM)
    tEM = ifEM(time)/2*np.pi  # (Multiplication of 2/pi in model.tem)

    # Return the electromagnetic time domain field
    # (Second argument is only for QWE)
    return tEM, True


# 3. Utilities
项目:Speaker-Recognition    作者:orchidas    | 项目源码 | 文件源码
def mfcc(s,fs, nfiltbank):

    #divide into segments of 25 ms with overlap of 10ms
    nSamples = np.int32(0.025*fs)
    overlap = np.int32(0.01*fs)
    nFrames = np.int32(np.ceil(len(s)/(nSamples-overlap)))
    #zero padding to make signal length long enough to have nFrames
    padding = ((nSamples-overlap)*nFrames) - len(s)
    if padding > 0:
        signal = np.append(s, np.zeros(padding))
    else:
        signal = s
    segment = np.empty((nSamples, nFrames))
    start = 0
    for i in range(nFrames):
        segment[:,i] = signal[start:start+nSamples]
        start = (nSamples-overlap)*i

    #compute periodogram
    nfft = 512
    periodogram = np.empty((nFrames,nfft/2 + 1))
    for i in range(nFrames):
        x = segment[:,i] * hamming(nSamples)
        spectrum = fftshift(fft(x,nfft))
        periodogram[i,:] = abs(spectrum[nfft/2-1:])/nSamples

    #calculating mfccs    
    fbank = mel_filterbank(nfft, nfiltbank, fs)
    #nfiltbank MFCCs for each frame
    mel_coeff = np.empty((nfiltbank,nFrames))
    for i in range(nfiltbank):
        for k in range(nFrames):
            mel_coeff[i,k] = np.sum(periodogram[k,:]*fbank[:,i])

    mel_coeff = np.log10(mel_coeff)
    mel_coeff = dct(mel_coeff)
    #exclude 0th order coefficient (much larger than others)
    mel_coeff[0,:]= np.zeros(nFrames)
    return mel_coeff
项目:pyNMR    作者:bennomeier    | 项目源码 | 文件源码
def fourierTransform(self, fromPos, toPos, only = []):
        self.checkToPos(toPos)
        if len(only) > 0:
            self.allFid[toPos] = np.array([fftshift(fft(self.allFid[fromPos][fidIndex])) for fidIndex in only])
        else:
            self.allFid[toPos] = np.array([fftshift(fft(fid)) for fid in self.allFid[fromPos]])

        self.frequency = np.linspace(-self.sweepWidthTD2/2,self.sweepWidthTD2/2,len(self.allFid[fromPos][0]))
项目:Speaker-Recognition    作者:orchidas    | 项目源码 | 文件源码
def stft(signal, fs, nfft, overlap):

    #plotting time domain signal
    plt.figure(1)
    t = np.arange(0,len(signal)/fs, 1/fs)
    plt.plot(t,signal)
    plt.axis(xmax = 1)
    plt.xlabel('Time in seconds')
    plt.ylabel('Amplitude')
    plt.title('Speech signal')

    if not np.log2(nfft).is_integer():
        nfft = nearestPow2(nfft)
    slength = len(signal)
    hop_size = np.int32(overlap * nfft)
    nFrames = int(np.round(len(signal)/(nfft-hop_size)))
    #zero padding to make signal length long enough to have nFrames
    signal = np.append(signal, np.zeros(nfft))
    STFT = np.empty((nfft, nFrames))
    segment = np.zeros(nfft)
    start = 0

    for n in range(nFrames):
        segment = signal[start:start+nfft] * hann(nfft) 
        padded_seg = np.append(segment,np.zeros(nfft))
        spec = fftshift(fft(padded_seg))
        spec = spec[len(spec)/2:]
        spec = abs(spec)/max(abs(spec))
        powerspec = 20*np.log10(spec)
        STFT[:,n] = powerspec
        start = start + nfft - hop_size 

    #plot spectrogram
    plt.figure(2)
    freq = (fs/(2*nfft)) * np.arange(0,nfft,1)
    time = np.arange(0,nFrames)*(slength/(fs*nFrames))
    plt.imshow(STFT, extent = [0,max(time),0,max(freq)],origin='lower', cmap='jet', interpolation='nearest', aspect='auto')
    plt.ylabel('Frequency in Hz')
    plt.xlabel('Time in seconds')
    plt.axis([0,max(time),0,np.max(freq)])
    plt.title('Spectrogram of speech')
    plt.show()
    return (STFT, time, freq)