我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.conj()。
def _ncc_c(x, y): """ >>> _ncc_c([1,2,3,4], [1,2,3,4]) array([ 0.13333333, 0.36666667, 0.66666667, 1. , 0.66666667, 0.36666667, 0.13333333]) >>> _ncc_c([1,1,1], [1,1,1]) array([ 0.33333333, 0.66666667, 1. , 0.66666667, 0.33333333]) >>> _ncc_c([1,2,3], [-1,-1,-1]) array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005]) """ den = np.array(norm(x) * norm(y)) den[den == 0] = np.Inf x_len = len(x) fft_size = 1<<(2*x_len-1).bit_length() cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))) cc = np.concatenate((cc[-(x_len-1):], cc[:x_len])) return np.real(cc) / den
def branch_power(self): """ "branch_power" computes the branch power (passive sign convention) :return: * self.pb: real power for '.op' and complex power for '.ac' """ # check is branch voltages are available if self.vb is None: self.branch_voltage() # check is branch current are available if self.ib is None: self.branch_current() if self.analysis[0].lower() == '.ac': self.pb = self.vb * np.conj(self.ib) else: self.pb = self.vb * self.ib
def mandelbrot(h, w, maxit): """ Returns an image of the Mandelbrot fractal of size (h,w). """ y, x = np.ogrid[-1.4:1.4:h * 1j, -2:0.8:w * 1j] c = x + y * 1j z = c divtime = maxit + np.zeros(z.shape, dtype=int) for i in range(maxit): z = z ** 2 + c diverge = z * np.conj(z) > 2 ** 2 div_now = diverge & (divtime == maxit) divtime[div_now] = i + 100 z[diverge] = 2 logger.debug("Updating divtime") recorder.record('divtime', divtime) return divtime
def nufft_scale1(N, K, alpha, beta, Nmid): ''' calculate image space scaling factor ''' # import types # if alpha is types.ComplexType: alpha = numpy.real(alpha) # print('complex alpha may not work, but I just let it as') L = len(alpha) - 1 if L > 0: sn = numpy.zeros((N, 1)) n = numpy.arange(0, N).reshape((N, 1), order='F') i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta for l1 in range(-L, L + 1): alf = alpha[abs(l1)] if l1 < 0: alf = numpy.conj(alf) sn = sn + alf * numpy.exp(i_gam_n_n0 * l1) else: sn = numpy.dot(alpha, numpy.ones((N, 1), dtype=numpy.float32)) return sn
def nufft_T(N, J, K, alpha, beta): ''' equation (29) and (26)Fessler's paper create the overlapping matrix CSSC (diagonal dominent matrix) of J points and then find out the pseudo-inverse of CSSC ''' # import scipy.linalg L = numpy.size(alpha) - 1 # print('L = ', L, 'J = ',J, 'a b', alpha,beta ) cssc = numpy.zeros((J, J)) [j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1] overlapping_mat = j2 - j1 for l1 in range(-L, L + 1): for l2 in range(-L, L + 1): alf1 = alpha[abs(l1)] # if l1 < 0: alf1 = numpy.conj(alf1) alf2 = alpha[abs(l2)] # if l2 < 0: alf2 = numpy.conj(alf2) tmp = overlapping_mat + beta * (l1 - l2) tmp = dirichlet(1.0 * tmp / (1.0 * K / N)) cssc = cssc + alf1 * numpy.conj(alf2) * tmp return mat_inv(cssc)
def precompute(self): # CSR_W = cuda_cffi.cusparse.CSR.to_CSR(self.st['W_gpu'],diag_type=True) # Dia_W_cpu = scipy.sparse.dia_matrix( (self.st['M'], self.st['M']),dtype=dtype) # Dia_W_cpu = scipy.sparse.dia_matrix( ( self.st['W'], 0 ), shape=(self.st['M'], self.st['M']) ) # Dia_W_cpu = scipy.sparse.diags(self.st['W'], format="csr", dtype=dtype) # CSR_W = cuda_cffi.cusparse.CSR.to_CSR(Dia_W_cpu) self.st['pHp_gpu'] = self.CSRH.gemm(self.CSR) self.st['pHp']=self.st['pHp_gpu'].get() print('untrimmed',self.st['pHp'].nnz) self.truncate_selfadjoint(1e-5) print('trimmed', self.st['pHp'].nnz) self.st['pHp_gpu'] = cuda_cffi.cusparse.CSR.to_CSR(self.st['pHp']) # self.st['pHWp_gpu'] = self.CSR.conj().gemm(CSR_W,transA=cuda_cffi.cusparse.CUSPARSE_OPERATION_TRANSPOSE) # self.st['pHWp_gpu'] = self.st['pHWp_gpu'].gemm(self.CSR, transA=cuda_cffi.cusparse.CUSPARSE_OPERATION_NON_TRANSPOSE)
def nufft_scale1(N, K, alpha, beta, Nmid): ''' Calculate image space scaling factor ''' # import types # if alpha is types.ComplexType: alpha = numpy.real(alpha) # print('complex alpha may not work, but I just let it as') L = len(alpha) - 1 if L > 0: sn = numpy.zeros((N, 1)) n = numpy.arange(0, N).reshape((N, 1), order='F') i_gam_n_n0 = 1j * (2 * numpy.pi / K) * (n - Nmid) * beta for l1 in range(-L, L + 1): alf = alpha[abs(l1)] if l1 < 0: alf = numpy.conj(alf) sn = sn + alf * numpy.exp(i_gam_n_n0 * l1) else: sn = numpy.dot(alpha, numpy.ones((N, 1))) return sn
def nufft_T(N, J, K, alpha, beta): ''' The Equation (29) and (26) in Fessler and Sutton 2003. Create the overlapping matrix CSSC (diagonal dominent matrix) of J points and find out the pseudo-inverse of CSSC ''' # import scipy.linalg L = numpy.size(alpha) - 1 # print('L = ', L, 'J = ',J, 'a b', alpha,beta ) cssc = numpy.zeros((J, J)) [j1, j2] = numpy.mgrid[1:J + 1, 1:J + 1] overlapping_mat = j2 - j1 for l1 in range(-L, L + 1): for l2 in range(-L, L + 1): alf1 = alpha[abs(l1)] # if l1 < 0: alf1 = numpy.conj(alf1) alf2 = alpha[abs(l2)] # if l2 < 0: alf2 = numpy.conj(alf2) tmp = overlapping_mat + beta * (l1 - l2) tmp = dirichlet(1.0 * tmp / (1.0 * K / N)) cssc = cssc + alf1 * alf2 * tmp return mat_inv(cssc)
def _evaluate_windows(self, window_a, window_b): """ Calculate the FFT of both windows, correlate and transform back. In order to decrease the error a mean subtraction is performed. To compensate for the indexing during the FFT a FFT Shift is performed. :param window_a: interrogation window :param window_b: search window :returns: correlation window """ fft_a = self._fa_fft(window_a - np.mean(window_a)) fft_b = self._fb_fft(window_b - np.mean(window_b)) fft_corr = fft_a*np.conj(fft_b) inv_fft = self._ift_fft(fft_corr) return np.fft.fftshift(inv_fft)
def _random_op(sites, ldim, hermitian=False, normalized=False, randstate=None, dtype=np.complex_): """Returns a random operator of shape (ldim,ldim) * sites with local dimension `ldim` living on `sites` sites in global form. :param sites: Number of local sites :param ldim: Local ldimension :param hermitian: Return only the hermitian part (default False) :param normalized: Normalize to Frobenius norm=1 (default False) :param randstate: numpy.random.RandomState instance or None :returns: numpy.ndarray of shape (ldim,ldim) * sites >>> A = _random_op(3, 2); A.shape (2, 2, 2, 2, 2, 2) """ op = _randfuncs[dtype]((ldim**sites,) * 2, randstate=randstate) if hermitian: op += np.transpose(op).conj() if normalized: op /= np.linalg.norm(op) return op.reshape((ldim,) * 2 * sites)
def ccorr(a, b): """ Circular correlation of vectors Computes the circular correlation of two vectors a and b via their fast fourier transforms a \ast b = \mathcal{F}^{-1}(\overline{\mathcal{F}(a)} \odot \mathcal{F}(b)) Parameter --------- a: real valued array (shape N) b: real valued array (shape N) Returns ------- c: real valued array (shape N), representing the circular correlation of a and b """ return ifft(np.conj(fft(a)) * fft(b)).real
def fft_convolve(X,Y, inv = 0): XF = np.fft.rfft2(X) YF = np.fft.rfft2(Y) # YF0 = np.copy(YF) # YF.imag = 0 # XF.imag = 0 if inv == 1: # plt.imshow(np.real(YF)); plt.colorbar(); plt.show() YF = np.conj(YF) SF = XF*YF S = np.fft.irfft2(SF) n1,n2 = np.shape(S) S = np.roll(S,-n1/2+1,axis = 0) S = np.roll(S,-n2/2+1,axis = 1) return np.real(S)
def _invert(self): """ Calculate the streamfunction given the potential vorticity. The algorithm is: 1) Calculate wave potential vorticity 2) Invert for wave, pw, and vortex stremfunctions, pv. 3) Calculate geostrophic stremfunction, p = pv+pw. """ # the wavy PV self.phich = self.fft(np.conj(self.phi)) self.phi2 = np.abs(self.phi)**2 self.jacph = self.jacobian_phic_phi() self.gphi2h = -self.wv2*self.fft(self.phi2) self.qwh = (0.5*self.gphi2h + 1j*self.jacph)/self.f/2. self.qwh *= self.filtr # invert for psi self.ph = -self.wv2i*(self.qh-self.qwh) # calculate in physical space self.p = self.ifft(self.ph).real
def fst_delay_snd(fst, snd, samp_rate): # Verify argument shape. s1, s2 = fst.shape, snd.shape if len(s1) != 1 or len(s2) != 1 or s1[0] != s2[0]: raise Exception("Argument shape invalid, in 'fst_delay_snd' function") length = s1[0] half_len = int(length / 2) Xfst = numpy.fft.fft(fst) Xsnd_star = numpy.conj(numpy.fft.fft(snd)) Xall = numpy.zeros(length, dtype=numpy.complex64) for i in range(0, length): if Xsnd_star[i] == 0 or Xfst[i] == 0: Xall[i] = 0 else: Xall[i] = (Xsnd_star[i] * Xfst[i]) / abs(Xsnd_star[i]) / abs(Xfst[i]) R = numpy.fft.ifft(Xall) max_pos = numpy.argmax(R) if max_pos > half_len: return -(length - 1 - max_pos) / samp_rate else: return max_pos / samp_rate
def fst_delay_snd(fst, snd, samp_rate): # Verify argument shape. s1, s2 = fst.shape, snd.shape if len(s1) != 1 or len(s2) != 1 or s1[0] != s2[0]: raise Exception("Argument shape invalid, in 'fst_delay_snd' function") length = s1[0] half_len = int(length / 2) Xfst = numpy.fft.fft(fst) Xsnd = numpy.fft.fft(snd) Xsnd_star = numpy.conj(Xsnd) Xall = numpy.zeros(length, dtype=numpy.complex64) for i in range(0, length): Xall[i] = (Xsnd_star[i] * Xfst[i]) / abs(Xsnd_star[i]) / abs(Xfst[i]) R = numpy.fft.ifft(Xall) max_pos = numpy.argmax(R) if max_pos > half_len: delta = -(length - 1 - max_pos) / samp_rate else: delta = max_pos / samp_rate return delta, R, Xfst, Xsnd
def test_probabilities(): p = 1 n_qubits = 2 # known set of angles for barbell angles = [1.96348709, 4.71241069] wf = np.array([-1.17642098e-05 - 1j*7.67538040e-06, -7.67563580e-06 - 1j*7.07106781e-01, -7.67563580e-06 - 1j*7.07106781e-01, -1.17642098e-05 - 1j*7.67538040e-06]) fakeQVM = Mock(spec=qvm_module.QVMConnection()) fakeQVM.wavefunction = Mock(return_value=(Wavefunction(wf))) inst = QAOA(fakeQVM, n_qubits, steps=p, rand_seed=42) true_probs = np.zeros_like(wf) for xx in range(wf.shape[0]): true_probs[xx] = np.conj(wf[xx]) * wf[xx] probs = inst.probabilities(angles) assert isinstance(probs, np.ndarray) prob_true = np.zeros((2**inst.n_qubits, 1)) prob_true[1] = 0.5 prob_true[2] = 0.5 assert np.isclose(probs, prob_true).all()
def probabilities(self, angles): """ Computes the probability of each state given a particular set of angles. :param angles: [list] A concatenated list of angles [betas]+[gammas] :return: [list] The probabilities of each outcome given those angles. """ if isinstance(angles, list): angles = np.array(angles) assert angles.shape[0] == 2 * self.steps, "angles must be 2 * steps" param_prog = self.get_parameterized_program() prog = param_prog(angles) wf = self.qvm.wavefunction(prog) wf = wf.amplitudes.reshape((-1, 1)) probs = np.zeros_like(wf) for xx in range(2 ** self.n_qubits): probs[xx] = np.conj(wf[xx]) * wf[xx] return probs
def correlate_periodic(a, v=None): """Cross-correlation of two 1-dimensional periodic sequences. a and v must be sequences with the same length. If v is not specified, it is assumed to be the same as a (i.e. the function computes auto-correlation). :param a: input sequence #1 :param v: input sequence #2 :returns: discrete periodic cross-correlation of a and v """ a_fft = _np.fft.fft(_np.asarray(a)) if v is None: v_cfft = a_fft.conj() else: v_cfft = _np.fft.fft(_np.asarray(v)).conj() x = _np.fft.ifft(a_fft * v_cfft) if _np.isrealobj(a) and (v is None or _np.isrealobj(v)): x = x.real return x
def eval_gradf(self): """ Compute gradient in Fourier domain """ # Compute X D - S self.Ryf = self.eval_Rf(self.Yf) # Map to spatial domain to multiply by mask Ry = sl.irfftn(self.Ryf, self.cri.Nv, self.cri.axisN) # Multiply by mask WRy = (self.W**2) * Ry # Map back to frequency domain WRyf = sl.rfftn(WRy, self.cri.Nv, self.cri.axisN) gradf = sl.inner(np.conj(self.Zf), WRyf, axis=self.cri.axisK) # Multiple channel signal, single channel dictionary if self.cri.C > 1 and self.cri.Cd == 1: gradf = np.sum(gradf, axis=self.cri.axisC, keepdims=True) return gradf
def setcoef(self, Z): """Set coefficient array.""" # If the dictionary has a single channel but the input (and # therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) + (self.cri.M,)) self.Z = np.asarray(Z, dtype=self.dtype) self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN) # Compute X^H S self.ZSf = sl.inner(np.conj(self.Zf), self.Sf, self.cri.axisK)
def setcoef(self, Z): """Set coefficient array.""" # If the dictionary has a single channel but the input (and # therefore also the coefficient map array) has multiple # channels, the channel index and multiple image index have # the same behaviour in the dictionary update equation: the # simplest way to handle this is to just reshape so that the # channels also appear on the multiple image index. if self.cri.Cd == 1 and self.cri.C > 1: Z = Z.reshape(self.cri.Nv + (1,) + (self.cri.Cx*self.cri.K,) + (self.cri.M,)) self.Z = np.asarray(Z, dtype=self.dtype) self.Zf = sl.rfftn(self.Z, self.cri.Nv, self.cri.axisN) # Compute X^H S self.ZSf = np.conj(self.Zf) * self.Sf
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ self.cgit = None self.YU[:] = self.Y - self.U self.block_sep0(self.YU)[:] += self.S YUf = sl.rfftn(self.YU, None, self.cri.axisN) b = sl.inner(np.conj(self.Zf), self.block_sep0(YUf), axis=self.cri.axisK) + self.block_sep1(YUf) self.Xf[:], cgit = sl.solvemdbi_cg(self.Zf, 1.0, b, self.cri.axisM, self.cri.axisK, self.opt['CG', 'StopTol'], self.opt['CG', 'MaxIter'], self.Xf) self.cgit = cgit self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) self.xstep_check(b)
def xstep(self): """The xstep of the baseline consensus class from which this class is derived is re-used to implement the xstep of the modified algorithm by replacing ``self.ZSf``, which is constant in the baseline algorithm, with a quantity derived from the additional variables ``self.Y1`` and ``self.U1``. It is also necessary to set the penalty parameter to unity for the duration of the x step. """ self.YU1[:] = self.Y1 - self.U1 self.ZSf = np.conj(self.Zf) * (self.Sf + sl.rfftn( self.YU1, None, self.cri.axisN)) rho = self.rho self.rho = 1.0 super(ConvCnstrMODMaskDcpl_Consensus, self).xstep() self.rho = rho
def xstep(self): r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{x}`. """ b = self.AHSf + self.rho*np.sum(np.conj(self.Gf)* sl.rfftn(self.Y-self.U, axes=self.axes), axis=self.Y.ndim-1) self.Xf = b / (self.AHAf + self.rho*self.GHGf) self.X = sl.irfftn(self.Xf, None, axes=self.axes) if self.opt['LinSolveCheck']: ax = (self.AHAf + self.rho*self.GHGf)*self.Xf self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
def eigenbasis(se, nb): # generate number sector ns1 = se.model.numbersector(nb) # get the size of the basis ns1size = ns1.basis.len # length of the number sector basis # G1i = range(ns1size) # our Greens function? # self energy # sigma = self.sigma(nb, phi) # Effective Hamiltonian H1n = ns1.hamiltonian # Complete diagonalization E1, psi1r = linalg.eig(H1n.toarray(), left=False) psi1l = np.conj(np.linalg.inv(psi1r)).T # psi1l = np.conj(psi1r).T # check for dark states (throw a warning if one shows up) # if (nb > 0): # Setup.check_for_dark_states(nb, E1) return E1, psi1l, psi1r
def _find_start_symbol(iq_data): ''' Correlate to find symbol boundaries ''' corr_length = 2*(SYMBOL_LENGTH) corr = np.empty(corr_length) for k in range(corr_length): leading = iq_data[k:k+GUARD_LENGTH] trailing = iq_data[k+USEFUL_LENGTH:k+SYMBOL_LENGTH] corr[k] = np.abs(np.dot(leading, np.conj(trailing))) first_symbol = np.argmax(corr)%(SYMBOL_LENGTH) return first_symbol
def get_power_spectral_density_matrix(observation, mask=None): """ Calculates the weighted power spectral density matrix. This does not yet work with more than one target mask. :param observation: Complex observations with shape (bins, sensors, frames) :param mask: Masks with shape (bins, frames) or (bins, 1, frames) :return: PSD matrix with shape (bins, sensors, sensors) """ bins, sensors, frames = observation.shape if mask is None: mask = np.ones((bins, frames)) if mask.ndim == 2: mask = mask[:, np.newaxis, :] normalization = np.maximum(np.sum(mask, axis=-1, keepdims=True), 1e-6) psd = np.einsum('...dt,...et->...de', mask * observation, observation.conj()) psd /= normalization return psd
def get_mvdr_vector(atf_vector, noise_psd_matrix): """ Returns the MVDR beamforming vector. :param atf_vector: Acoustic transfer function vector with shape (..., bins, sensors) :param noise_psd_matrix: Noise PSD matrix with shape (bins, sensors, sensors) :return: Set of beamforming vectors with shape (..., bins, sensors) """ while atf_vector.ndim > noise_psd_matrix.ndim - 1: noise_psd_matrix = np.expand_dims(noise_psd_matrix, axis=0) # Make sure matrix is hermitian noise_psd_matrix = 0.5 * ( noise_psd_matrix + np.conj(noise_psd_matrix.swapaxes(-1, -2))) numerator = solve(noise_psd_matrix, atf_vector) denominator = np.einsum('...d,...d->...', atf_vector.conj(), numerator) beamforming_vector = numerator / np.expand_dims(denominator, axis=-1) return beamforming_vector
def apply_sdw_mwf(mix, target_psd_matrix, noise_psd_matrix, mu=1, corr=None): """ Apply speech distortion weighted MWF: h = Tpsd * e1 / (Tpsd + mu*Npsd) :param mix: the signal complex FFT :param target_psd_matrix (bins, sensors, sensors) :param noise_psd_matrix :param mu: the lagrange factor :return """ bins, sensors, frames = mix.shape ref_vector = np.zeros((sensors,1), dtype=np.float) if corr is None: ref_ch = 0 else: # choose the channel with highest correlation with the others corr=corr.tolist() while len(corr) > sensors: corr.remove(np.min(corr)) ref_ch=np.argmax(corr) ref_vector[ref_ch,0]=1 mwf_vector = solve(target_psd_matrix + mu*noise_psd_matrix, target_psd_matrix[:,:,ref_ch]) return np.einsum('...a,...at->...t', mwf_vector.conj(), mix)
def test_blas_cgemm(backend, m, n, k, alpha, beta, forward): b = backend() y = indigo.util.rand64c(m,n) M = indigo.util.rand64c(m,k) x = indigo.util.rand64c(k,n) if not forward: x, y = y, x M_exp = np.conj(M.T) else: M_exp = M y_exp = alpha * M_exp.dot(x) + beta * y y_d = b.copy_array(y) M_d = b.copy_array(M) x_d = b.copy_array(x) b.cgemm(y_d, M_d, x_d, alpha, beta, forward=forward) y_act = y_d.to_host() np.testing.assert_allclose(y_exp, y_act, atol=1e-3)
def _lagged_coherence_1freq(x, f, Fs, N_cycles=3, f_step=1): """Calculate lagged coherence of x at frequency f using the hanning-taper FFT method""" Nsamp = int(np.ceil(N_cycles*Fs / f)) # For each N-cycle chunk, calculate phase chunks = _nonoverlapping_chunks(x,Nsamp) C = len(chunks) hann_window = signal.hanning(Nsamp) fourier_f = np.fft.fftfreq(Nsamp,1/float(Fs)) fourier_f_idx = _arg_closest_value(fourier_f,f) fourier_coefsoi = np.zeros(C,dtype=complex) for i2, c in enumerate(chunks): fourier_coef = np.fft.fft(c*hann_window) fourier_coefsoi[i2] = fourier_coef[fourier_f_idx] lcs_num = 0 for i2 in range(C-1): lcs_num += fourier_coefsoi[i2]*np.conj(fourier_coefsoi[i2+1]) lcs_denom = np.sqrt(np.sum(np.abs(fourier_coefsoi[:-1])**2)*np.sum(np.abs(fourier_coefsoi[1:])**2)) return np.abs(lcs_num/lcs_denom)
def _create_rotational_weights_for_elements(self, kpoint, transformation_matrix, vectors): """ Parameters ---------- kpoint : 1d array Reciprocal space point in fractional coordinates for PC. vectors : (..., natoms_p * ndims, nbands) array Vectors for SC after translational projection. """ projected_vectors = self._rotational_projector.project_vectors( vectors, kpoint, transformation_matrix) nirreps, natoms_p, nelms, tmp, nbands = projected_vectors.shape shape = (nirreps, natoms_p, nelms, natoms_p, nelms, nbands) weights = np.zeros(shape, dtype=complex) for i in range(nirreps): for j in range(nbands): weights[i, ..., j] = np.inner( np.conj(projected_vectors[i, ..., j]), projected_vectors[i, ..., j]) return weights, projected_vectors
def autocorrelation(self): "Autocorrelation as a function of time" if self.__autocorrelation is not None: return self.__autocorrelationTimeSeries, self.__autocorrelation negT = -np.flipud(self.timeSeries[1:]) autocorrelationTime = np.hstack((negT, self.timeSeries)) self.__autocorrelationTimeSeries = autocorrelationTime initialWF = self[0] ACF = [] for WF in self: ACF.append(WF.overlap(initialWF)) ACF = np.array(ACF) negACF = np.conj(np.flipud(ACF[1:])) totalACF = np.hstack((negACF, ACF)) self.__autocorrelation = totalACF return self.__autocorrelationTimeSeries, self.__autocorrelation
def swap_Nq(fft_y, fu, fft_x, N): f = fu[:, 0].copy() fft_x[0] = f[0].real fft_x[1:N//2] = 0.5*(f[1:N//2] + np.conj(f[:N//2:-1])) fft_x[N//2] = f[N//2].real fu[:N//2+1, 0] = fft_x[:N//2+1] fu[N//2+1:, 0] = np.conj(fft_x[(N//2-1):0:-1]) fft_y[0] = f[0].imag fft_y[1:N//2] = -0.5*1j*(f[1:N//2] - np.conj(f[:N//2:-1])) fft_y[N//2] = f[N//2].imag fft_y[N//2+1:] = np.conj(fft_y[(N//2-1):0:-1]) return fft_y
def nufft_alpha_kb_fit(N, J, K): ''' find out parameters alpha and beta of scaling factor st['sn'] Note, when J = 1 , alpha is hardwired as [1,0,0...] (uniform scaling factor) ''' beta = 1 Nmid = (N - 1.0) / 2.0 if N > 40: L = 13 else: L = numpy.ceil(N / 3) nlist = numpy.arange(0, N) * 1.0 - Nmid (kb_a, kb_m) = kaiser_bessel('string', J, 'best', 0, K / N) if J > 1: sn_kaiser = 1 / kaiser_bessel_ft(nlist / K, J, kb_a, kb_m, 1.0) elif J == 1: # The case when samples are on regular grids sn_kaiser = numpy.ones((1, N), dtype=dtype) gam = 2 * numpy.pi / K X_ant = beta * gam * nlist.reshape((N, 1), order='F') X_post = numpy.arange(0, L + 1) X_post = X_post.reshape((1, L + 1), order='F') X = numpy.dot(X_ant, X_post) # [N,L] X = numpy.cos(X) sn_kaiser = sn_kaiser.reshape((N, 1), order='F').conj() X = numpy.array(X, dtype=dtype) sn_kaiser = numpy.array(sn_kaiser, dtype=dtype) coef = numpy.linalg.lstsq(numpy.nan_to_num(X), numpy.nan_to_num(sn_kaiser))[0] # (X \ sn_kaiser.H); alphas = coef if J > 1: alphas[0] = alphas[0] alphas[1:] = alphas[1:] / 2.0 elif J == 1: # cases on grids alphas[0] = 1.0 alphas[1:] = 0.0 alphas = numpy.real(alphas) return (alphas, beta)
def iterate_l1(L, alpha, arg, beta, K, N, rr): oversample_ratio = (1.0 * K / N) for l1 in range(-L, L + 1): alf = alpha[abs(l1)] * 1.0 if l1 < 0: alf = numpy.conj(alf) # r1 = numpy.sinc(1.0*(arg+1.0*l1*beta)/(1.0*K/N)) input_array = (arg + 1.0 * l1 * beta) / oversample_ratio r1 = dirichlet(input_array.astype(numpy.float32)) rr = iterate_sum(rr, alf, r1) return rr
def nufft_r(om, N, J, K, alpha, beta): ''' equation (30) of Fessler's paper ''' def iterate_sum(rr, alf, r1): rr = rr + alf * r1 return rr def iterate_l1(L, alpha, arg, beta, K, N, rr): oversample_ratio = (1.0 * K / N) import time t0=time.time() for l1 in range(-L, L + 1): alf = alpha[abs(l1)] * 1.0 # if l1 < 0: # alf = numpy.conj(alf) # r1 = numpy.sinc(1.0*(arg+1.0*l1*beta)/(1.0*K/N)) input_array = (arg + 1.0 * l1 * beta) / oversample_ratio r1 = dirichlet(input_array) rr = iterate_sum(rr, alf, r1) return rr M = numpy.size(om) # 1D size gam = 2.0 * numpy.pi / (K * 1.0) nufft_offset0 = nufft_offset(om, J, K) # om/gam - nufft_offset , [M,1] dk = 1.0 * om / gam - nufft_offset0 # om/gam - nufft_offset , [M,1] arg = outer_sum(-numpy.arange(1, J + 1) * 1.0, dk) L = numpy.size(alpha) - 1 # print('alpha',alpha) rr = numpy.zeros((J, M), dtype=numpy.float32) rr = iterate_l1(L, alpha, arg, beta, K, N, rr) return (rr, arg)
def series_nfft(series, oversample=4): """ note that output period units are [days] (so is frequency) """ M = len(series) if not is_power_of_2(M): raise ValueError('series length {} is not a power of 2'.format(len(series))) N = M if N % 2 == 1: # number of frequency samples must be even N += 1 N *= oversample # re-grid time the interval [-1/2, 1/2) as required by nfft time = series.index.astype(NP.int64) / 1e9 time -= time[0] b = -0.5 a = (M - 1) / (M * time[-1]) x = a * time + b # setup for nfft computation plan = NFFT(N, M) plan.x = x plan.f = series.values plan.precompute() # compute nfft (note that conjugation is necessary because of the # difference in transform sign convention) x_nfft = NP.conj(plan.adjoint()) # calculate frequencies and periods dt = ((series.index[-1] - series.index[0]) / M).total_seconds() / DAYS_TO_SECONDS f_range = NP.fft.fftshift(NP.fft.fftfreq(N, dt)) T_range = 1 / f_range return x_nfft, f_range, T_range
def spatFT(data, position_grid, order_max=10, spherical_harmonic_bases=None): ''' Spatial Fourier Transform Parameters ---------- data : array_like Data to be transformed, with signals in rows and frequency bins in columns order_max : int, optional Maximum transform order (Default: 10) position_grid : array_like or io.SphericalGrid Azimuths/Colatitudes/Gridweights of spatial sampling points Returns ------- Pnm : array_like Spatial Fourier Coefficients with nm coeffs in rows and FFT bins in columns ''' data = _np.atleast_2d(data) number_of_signals, FFTLength = data.shape position_grid = SphericalGrid(*position_grid) # Re-generate spherical harmonic bases if they were not provided or their order is too small if (spherical_harmonic_bases is None or spherical_harmonic_bases.shape[0] < number_of_signals or spherical_harmonic_bases.shape[1] < (order_max + 1) ** 2): spherical_harmonic_bases = sph_harm_all(order_max, position_grid.azimuth, position_grid.colatitude) spherical_harmonic_bases = (_np.conj(spherical_harmonic_bases).T * (4 * _np.pi * position_grid.weight)) return _np.inner(spherical_harmonic_bases, data.T)
def _random_state(sites, ldim, randstate=None): """Returns a random positive semidefinite operator of shape (ldim, ldim) * sites normalized to Tr rho = 1, i.e. a mixed state with local dimension `ldim` living on `sites` sites. Note that the returned state is positive semidefinite only when interpreted in global form (see :func:`tools.global_to_local`) :param sites: Number of local sites :param ldim: Local ldimension :param randstate: numpy.random.RandomState instance or None :returns: numpy.ndarray of shape (ldim, ldim) * sites >>> from numpy.linalg import eigvalsh >>> rho = _random_state(3, 2).reshape((2**3, 2**3)) >>> all(eigvalsh(rho) >= 0) True >>> np.abs(np.trace(rho) - 1) < 1e-6 True """ shape = (ldim**sites, ldim**sites) mat = _zrandn(shape, randstate=randstate) rho = np.conj(mat.T).dot(mat) rho /= np.trace(rho) return rho.reshape((ldim,) * 2 * sites) #################################### # Factory functions for MPArrays # ####################################
def random_mpo(sites, ldim, rank, randstate=None, hermitian=False, normalized=True, force_rank=False): """Returns an hermitian MPO with randomly choosen local tensors :param sites: Number of sites :param ldim: Local dimension :param rank: Rank :param randstate: numpy.random.RandomState instance or None :param hermitian: Is the operator supposed to be hermitian :param normalized: Operator should have unit norm :param force_rank: If True, the rank is exaclty `rank`. Otherwise, it might be reduced if we reach the maximum sensible rank. :returns: randomly choosen matrix product operator >>> mpo = random_mpo(4, 2, 10, force_rank=True) >>> mpo.ranks, mpo.shape ((10, 10, 10), ((2, 2), (2, 2), (2, 2), (2, 2))) >>> mpo.canonical_form (0, 4) """ mpo = random_mpa(sites, (ldim,) * 2, rank, randstate=randstate, force_rank=force_rank, dtype=np.complex_) if hermitian: # make mpa Herimitan in place, without increasing rank: ltens = (l + l.swapaxes(1, 2).conj() for l in mpo.lt) mpo = mp.MPArray(ltens) if normalized: # we do this with a copy to ensure the returned state is not # normalized mpo /= mp.norm(mpo.copy()) return mpo
def random_mpdo(sites, ldim, rank, randstate=np.random): """Returns a randomly choosen matrix product density operator (i.e. positive semidefinite matrix product operator with trace 1). :param sites: Number of sites :param ldim: Local dimension :param rank: Rank :param randstate: numpy.random.RandomState instance :returns: randomly choosen classicaly correlated matrix product density op. >>> rho = random_mpdo(4, 2, 4) >>> rho.ranks, rho.shape ((4, 4, 4), ((2, 2), (2, 2), (2, 2), (2, 2))) >>> rho.canonical_form (0, 4) """ # generate density matrix as a mixture of `rank` pure product states psis = [random_mps(sites, ldim, 1, randstate=randstate) for _ in range(rank)] weights = (lambda x: x / np.sum(x))(randstate.rand(rank)) rho = mp.sumup(mpsmpo.mps_to_mpo(psi) * weight for weight, psi in zip(weights, psis)) # Scramble the local tensors for n, rank in enumerate(rho.ranks): unitary = _unitary_haar(rank, randstate) rho.lt[n] = matdot(rho.lt[n], unitary) rho.lt[n + 1] = matdot(np.transpose(unitary).conj(), rho.lt[n + 1]) rho /= mp.trace(rho) return rho
def test_conjugations(nr_sites, local_dim, _, rgen, dtype): op = factory._random_op(nr_sites, local_dim, randstate=rgen, dtype=dtype) mpo = mp.MPArray.from_array(op, 2) assert_array_almost_equal(np.conj(op), mpo.conj().to_array()) assert mpo.conj().dtype == dtype mpo.canonicalize() mpo_c = mpo.conj() assert_correct_normalization(mpo_c)
def test_norm(nr_sites, local_dim, rank, dtype, rgen): mp_psi = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen, dtype=dtype) psi = mp_psi.to_array() assert_almost_equal(mp.inner(mp_psi, mp_psi), mp.norm(mp_psi)**2) assert_almost_equal(np.sum(psi.conj() * psi), mp.norm(mp_psi)**2)
def SLshearrecadjoint2D(X, shearletSystem): """ Adjoint of (pseudo-)inverse of 2D data. Note that this is also the (pseudo-)inverse of the adjoint. Usage: coeffs = SLshearrecadjoint2D(X, shearletSystem) Input: X : 2D data. shearletSystem: Structure containing a shearlet system. This should be the same system as the one previously used for decomposition. Output: coeffs: X x Y x N array of shearlet coefficients. """ # skipping useGPU stuff... # STUFF Xfreq = np.divide(fftlib.fftshift(fftlib.fft2(fftlib.ifftshift(X))), shearletSystem["dualFrameWeights"]) coeffs = np.zeros(shearletSystem["shearlets"].shape, dtype=complex) for j in range(shearletSystem["nShearlets"]): coeffs[:,:,j] = fftlib.fftshift(fftlib.ifft2(fftlib.ifftshift(Xfreq*np.conj(shearletSystem["shearlets"][:,:,j])))) return np.real(coeffs).astype(X.dtype) # ##############################################################################
def test_simple_conjugate(self): ref = np.conj(np.sqrt(np.complex(1, 1))) def f(z): return np.sqrt(np.conj(z)) yield check_complex_value, f, 1, 1, ref.real, ref.imag, False #def test_branch_cut(self): # _check_branch_cut(f, -1, 0, 1, -1)
def test_cabs_inf_nan(self): x, y = [], [] # cabs(+-nan + nani) returns nan x.append(np.nan) y.append(np.nan) yield check_real_value, np.abs, np.nan, np.nan, np.nan x.append(np.nan) y.append(-np.nan) yield check_real_value, np.abs, -np.nan, np.nan, np.nan # According to C99 standard, if exactly one of the real/part is inf and # the other nan, then cabs should return inf x.append(np.inf) y.append(np.nan) yield check_real_value, np.abs, np.inf, np.nan, np.inf x.append(-np.inf) y.append(np.nan) yield check_real_value, np.abs, -np.inf, np.nan, np.inf # cabs(conj(z)) == conj(cabs(z)) (= cabs(z)) def f(a): return np.abs(np.conj(a)) def g(a, b): return np.abs(np.complex(a, b)) xa = np.array(x, dtype=np.complex) for i in range(len(xa)): ref = g(x[i], y[i]) yield check_real_value, f, x[i], y[i], ref
def cor_fft(x1,x2,sigma): dist11 = np.sum(np.square(x1)) dist22 = np.sum(np.square(x2)) if len(x1.shape)==2: c = np.fft.ifft2((np.conj(fft(x1))*fft(x2))) else: c = np.fft.ifft2(np.sum(np.conj(fft(x1))*fft(x2),2)) dist= dist11-2*c+dist22 cor = np.exp(-1*dist/(sigma**2*x1.size)) cor = np.real(cor) return cor