我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用numpy.isreal()。
def get_Z(self, T, p): #A = self.a * p / self.R**2 / T**2.5 #B = self.b * p / self.R / T A = self.get_A(T, p) B = self.get_B(T, p) # Solve the cubic equation for compressibility factor z # Z^3 - Z^2 + (A-B-B**2)*Z - A*B = 0 coeffs = [1, -1, A-B-B**2, -A*B] roots = np.roots(coeffs) real_roots = roots[np.isreal(roots)].real valid_roots = real_roots[real_roots > p*self.b/self.R/T] return valid_roots # dZ/dT at const. p
def validate_gibbs_parameters(alpha1, alpha2, beta, restarts, draws_per_restart, burnin, delay): '''Return `True` if params numerically acceptable. See `gibbs` for docs.''' real_vals = [alpha1, alpha2, beta] int_vals = [restarts, draws_per_restart, burnin, delay] # Check everything is real. if all(np.isreal(val) for val in real_vals + int_vals): # Check that integer values are some type of int. int_check = all(isinstance(val, (int, np.int32, np.int64)) for val in int_vals) # All integer values must be > 0. pos_int = all(val > 0 for val in int_vals) # All real values must be non-negative. non_neg = all(val >= 0 for val in real_vals) return int_check and pos_int and non_neg and real_vals else: # Failed to be all numeric values. False
def _set_widget_value(self, new_value, transform_magnitude=lambda data : 20. * np.log10(np.abs(data) + sys.float_info.epsilon)): if new_value is None: return x, y = new_value shape = np.shape(y) if len(shape) > 2: raise ValueError("Data cannot be larger than 2 " "dimensional") if len(shape) == 1: y = [y] self._set_real(np.isreal(y).all()) for i, values in enumerate(y): self._display_curve_index(x, values, i, transform_magnitude=transform_magnitude) while (i + 1 < len(self.curves)): # delete remaining curves i += 1 self.curves[i].hide()
def get_Z(self, T, p): kappa = 0.37464 + 1.54226*self.acentric - 0.26992*self.acentric**2 Tr = T/self.T_crit alpha = (1 + kappa*(1 - Tr**0.5))**2 A = alpha * self.a * p / self.R**2 / T**2 B = self.b * p / self.R / T # Solve the cubic equation for compressibility factor z coeffs = [1, -(1-B), A-2*B-3*B**2, -(A*B-B**2-B**3)] #print(coeffs) roots = np.roots(coeffs) real_roots = roots[np.isreal(roots)].real valid_roots = real_roots[real_roots > p*self.b/self.R/T] return valid_roots
def fit_cubic(y0, y1, g0, g1): """Fit cubic polynomial to function values and derivatives at x = 0, 1. Returns position and function value of minimum if fit succeeds. Fit does not succeeds if 1. polynomial doesn't have extrema or 2. maximum is from (0,1) or 3. maximum is closer to 0.5 than minimum """ a = 2*(y0-y1)+g0+g1 b = -3*(y0-y1)-2*g0-g1 p = np.array([a, b, g0, y0]) r = np.roots(np.polyder(p)) if not np.isreal(r).all(): return None, None r = sorted(x.real for x in r) if p[0] > 0: maxim, minim = r else: minim, maxim = r if 0 < maxim < 1 and abs(minim-0.5) > abs(maxim-0.5): return None, None return minim, np.polyval(p, minim)
def vals2coeffs2(vals): """Map function values at Chebyshev points of 2nd kind to first-kind Chebyshev polynomial coefficients""" n = vals.size if n <= 1: coeffs = vals return coeffs tmp = np.append( vals[::-1], vals[1:-1] ) if np.isreal(vals).all(): coeffs = ifft(tmp) coeffs = np.real(coeffs) elif np.isreal( 1j*vals ).all(): coeffs = ifft(np.imag(tmp)) coeffs = 1j * np.real(coeffs) else: coeffs = ifft(tmp) coeffs = coeffs[:n] coeffs[1:n-1] = 2*coeffs[1:n-1] return coeffs
def coeffs2vals2(coeffs): """Map first-kind Chebyshev polynomial coefficients to function values at Chebyshev points of 2nd kind""" n = coeffs.size if n <= 1: vals = coeffs return vals coeffs = coeffs.copy() coeffs[1:n-1] = .5 * coeffs[1:n-1] tmp = np.append( coeffs, coeffs[n-2:0:-1] ) if np.isreal(coeffs).all(): vals = fft(tmp) vals = np.real(vals) elif np.isreal(1j*coeffs).all(): vals = fft(np.imag(tmp)) vals = 1j * np.real(vals) else: vals = fft(tmp) vals = vals[n-1::-1] return vals
def _modify_dataframe(self, df): """Add row to dataframe, containing numbers aggregated with self.operator.""" if self.total_columns == []: columns = df.columns else: columns = self.total_columns if self.operator is not OP_NONE: last_row = self.operator(df[df.applymap(np.isreal)]) last_row = last_row.fillna(0.) last_row[~last_row.index.isin(columns)] = '' else: last_row = pd.Series('', index=df.columns) last_row.name = self.row_name # Appending kills index name, save now and restore after appending index_name = df.index.name df = df.append(last_row) df.index.name = index_name return df
def _incarify(value): """Format value of any compatible type into the string forat appropriate for INCAR files""" result = None if isinstance(value, (str, unicode)): result = value elif not np.isscalar(value): value_array = np.array(value) shape = value_array.shape dim = len(shape) if dim == 1: result = ' '.join([_incarify(i) for i in value]) elif dim == 2: result = '\n'.join([_incarify(i) for i in value]) elif dim > 2: raise TypeError('you are trying to input a more ' + 'than 2-dimensional array to VASP.' + 'Not sure what to do...') elif isinstance(value, bool): result = '.True.' if value else '.False.' elif np.isreal(value): result = '{}'.format(value) return result
def __call__(self, x): if isinstance(x, Iterable): valid_idxs = (x > self._xmin-TINY_NUMBER) & (x < self._xmax+TINY_NUMBER) res = np.ones_like (x, dtype=float) * (BIG_NUMBER+self.peak_val) tmp_x = np.copy(x[valid_idxs]) tmp_x[tmp_x<self._xmin+TINY_NUMBER] = self._xmin+TINY_NUMBER tmp_x[tmp_x>self._xmax-TINY_NUMBER] = self._xmax-TINY_NUMBER res[valid_idxs] = self._peak_val + self._func(tmp_x) return res elif np.isreal(x): if x < self._xmin or x > self._xmax: return BIG_NUMBER+self.peak_val # x is within interpolation range elif self._delta == True: return self._peak_val else: return self._peak_val + self._func(x) else: raise TypeError("Wrong type: should be float or array")
def getPosteriorMeanAndVar(self, diagKTestTest, KtrainTest, post, intercept=0): L = post['L'] if (np.size(L) == 0): raise Exception('L is an empty array') #possible to compute it here Lchol = np.all((np.all(np.tril(L, -1)==0, axis=0) & (np.diag(L)>0)) & np.isreal(np.diag(L))) ns = diagKTestTest.shape[0] nperbatch = 5000 nact = 0 #allocate mem fmu = np.zeros(ns) #column vector (of length ns) of predictive latent means fs2 = np.zeros(ns) #column vector (of length ns) of predictive latent variances while (nact<(ns-1)): id = np.arange(nact, np.minimum(nact+nperbatch, ns)) kss = diagKTestTest[id] Ks = KtrainTest[:, id] if (len(post['alpha'].shape) == 1): try: Fmu = intercept[id] + Ks.T.dot(post['alpha']) except: Fmu = intercept + Ks.T.dot(post['alpha']) fmu[id] = Fmu else: try: Fmu = intercept[id][:, np.newaxis] + Ks.T.dot(post['alpha']) except: Fmu = intercept + Ks.T.dot(post['alpha']) fmu[id] = Fmu.mean(axis=1) if Lchol: V = la.solve_triangular(L, Ks*np.tile(post['sW'], (id.shape[0], 1)).T, trans=1, check_finite=False, overwrite_b=True) fs2[id] = kss - np.sum(V**2, axis=0) #predictive variances else: fs2[id] = kss + np.sum(Ks * (L.dot(Ks)), axis=0) #predictive variances fs2[id] = np.maximum(fs2[id],0) #remove numerical noise i.e. negative variances nact = id[-1] #set counter to index of last processed data point return fmu, fs2
def discard_where_data_missing(data, field): """ Discard subjects where data for a particular field is missing. Assumes the missing data value is NaN. Non-numeric values are never considered missing, even the empty string. """ keep_indices = [] for i, value in enumerate(data.subject_data[field].values): if not (np.isreal(value) and np.isnan(value)): keep_indices.append(i) return select_subjects(data, keep_indices)
def _set_widget_value(self, new_value): x, y, name = self.get_xy_data(new_value) if x is not None: if not np.isreal(y).all(): self.curve.setData(x, self._magnitude(y)) self.curve_phase.setData(x, self._phase(y)) self.plot_item_phase.show() self.plot_item.setTitle(name + " - Magnitude (dB)") else: self.curve.setData(x, np.real(y)) self.plot_item_phase.hide() self.plot_item.setTitle(name)
def get_Z(self, T, p): """ Returns the compressibility factor of a real gas. :param T: temperature (K) :type T: float :param p: pressure (Pa) :type p: float :return: compressibility factor :rtype: float """ # a = self.get_a(T) # coeffs = [ # 1, # (-self.R*T - self.b*p + self.c*p + self.d*p)/(self.R*T), # (-self.R*T*self.d*p + a*p - self.b*self.d*p**2 + self.c*self.d*p**2 + self.e*p**2)/(self.R**2*T**2), # (-self.R*T*self.e*p**2 - a*self.b*p**2 + a*self.c*p**2 - self.b*self.e*p**3 + self.c*self.e*p**3)/(self.R**3*T**3) # ] coeffs = [1, self.get_A(T, p), self.get_B(T, p), self.get_C(T, p)] #print(coeffs) roots = np.roots(coeffs) real_roots = roots[np.isreal(roots)].real if len(real_roots) == 1: real_roots = real_roots[0] return real_roots # Partial derivative of Z wrt T at constant p
def get_Z(self, T, p): A = self.get_A(T, p) B = self.get_B(T, p) # Solve the cubic equation for compressibility factor z coeffs = [1, -1, A-B-B**2, -A*B] roots = np.roots(coeffs) real_roots = roots[np.isreal(roots)].real valid_roots = real_roots[real_roots > p*self.b/self.R/T] return valid_roots
def test_lnprob_calculates_probability_for_success(datasets_db): """lnprob() successfully calculates the probability for equilibrium """ datasets_db.insert(zpf_json) res = lnprob([10], comps=['CU','MG', 'VA'], dbf=dbf, phases=['LIQUID', 'FCC_A1', 'HCP_A3', 'LAVES_C15', 'CUMG2'], datasets=datasets_db, symbols_to_fit=['VV0001'], phase_models=None, scheduler=None) assert np.isreal(res) assert np.isclose(res, -5740.542839073727)
def fit_quartic(y0, y1, g0, g1): """Fit constrained quartic polynomial to function values and erivatives at x = 0,1. Returns position and function value of minimum or None if fit fails or has a maximum. Quartic polynomial is constrained such that it's 2nd derivative is zero at just one point. This ensures that it has just one local extremum. No such or two such quartic polynomials always exist. From the two, the one with lower minimum is chosen. """ def g(y0, y1, g0, g1, c): a = c+3*(y0-y1)+2*g0+g1 b = -2*c-4*(y0-y1)-3*g0-g1 return np.array([a, b, c, g0, y0]) def quart_min(p): r = np.roots(np.polyder(p)) is_real = np.isreal(r) if is_real.sum() == 1: minim = r[is_real][0].real else: minim = r[(r == max(-abs(r))) | r == -max(-abs(r))][0].real return minim, np.polyval(p, minim) D = -(g0+g1)**2-2*g0*g1+6*(y1-y0)*(g0+g1)-6*(y1-y0)**2 # discriminant of d^2y/dx^2=0 if D < 1e-11: return None, None else: m = -5*g0-g1-6*y0+6*y1 p1 = g(y0, y1, g0, g1, .5*(m+np.sqrt(2*D))) p2 = g(y0, y1, g0, g1, .5*(m-np.sqrt(2*D))) if p1[0] < 0 and p2[0] < 0: return None, None [minim1, minval1] = quart_min(p1) [minim2, minval2] = quart_min(p2) if minval1 < minval2: return minim1, minval1 else: return minim2, minval2
def delayseq(x, delay_sec:float, fs:int): """ x: input 1-D signal delay_sec: amount to shift signal [seconds] fs: sampling frequency [Hz] xs: time-shifted signal """ assert x.ndim == 1, 'only 1-D signals for now' delay_samples = delay_sec*fs delay_int = round(delay_samples) nfft = nextpow2(x.size+delay_int) fbins = 2*pi*ifftshift((arange(nfft)-nfft//2))/nfft X = fft(x,nfft) Xs = ifft(X*exp(-1j*delay_samples*fbins)) if isreal(x[0]): Xs = Xs.real xs = zeros_like(x) xs[delay_int:] = Xs[delay_int:x.size] return xs
def coeffmult(fc, gc): """Coefficient-Space multiplication of equal-length first-kind Chebyshev series.""" Fc = np.append( 2.*fc[:1], (fc[1:], fc[:0:-1]) ) Gc = np.append( 2.*gc[:1], (gc[1:], gc[:0:-1]) ) ak = ifft( fft(Fc) * fft(Gc) ) ak = np.append( ak[:1], ak[1:] + ak[:0:-1] ) * .25 ak = ak[:fc.size] inputcfs = np.append(fc, gc) out = np.real(ak) if np.isreal(inputcfs).all() else ak return out
def get_omega2(self, step=None): if isreal(self.omega2): return self.omega2 return self.__get('omega2', step)
def discretize(self, nb_steps): """ Discretize the remaining preview with a uniform timestep. Parameter --------- nb_steps : integer Number of discretization time steps. Returns ------- X : array, shape=(N + 1, 9) Series of discretized states (com, comd, zmp). contacts : list of Contacts List of N + 1 contacts corresponding to each step k from 0 to N. """ assert isreal(self.omega2), "Discretization only works on FIP previews" X = [] contacts = [] input_step = self.cur_step com, comd = self.P[0], self.V[0] output_timestep = self.rem_duration / nb_steps rem_dT = self.rem_dT omega2 = self.omega2 for _ in xrange(nb_steps): X.append(hstack([com, comd, self.get_zmp(input_step)])) contacts.append(self.get_contact(input_step)) time_to_fill = output_timestep while time_to_fill > 1e-10: if rem_dT < 1e-10: input_step += 1 rem_dT = self.get_dT(input_step) zmp = self.get_zmp(input_step) dt = min(time_to_fill, rem_dT) com, comd = integrate_fip(com, comd, zmp, dt, omega2) time_to_fill -= dt rem_dT -= dt cp = com + comd / sqrt(omega2) + gravity / omega2 X.append(hstack([com, comd, cp])) contacts.append(contacts[-1]) return array(X), contacts
def _compute_minimum(self, a1, p1, dp1, a2, p2, dp2): cubic_mtx = np.zeros((4, 4)) cubic_mtx[0, :] = [1., a1, a1 ** 2, a1 ** 3] cubic_mtx[1, :] = [1., a2, a2 ** 2, a2 ** 3] cubic_mtx[2, :] = [0., 1., 2 * a1, 3 * a1 ** 2] cubic_mtx[3, :] = [0., 1., 2 * a2, 3 * a2 ** 2] c0, c1, c2, c3 = np.linalg.solve(cubic_mtx, [p1, p2, dp1, dp2]) d0 = c1 d1 = 2 * c2 d2 = 3 * c3 r1, r2 = np.roots([d2, d1, d0]) a = None p = max(p1, p2) if (a1 <= r1 <= a2 or a2 <= r1 <= a1) and np.isreal(r1): px = self._phi(r1) if px < p: a = r1 p = px dp = self._dphi(r1) if (a1 <= r2 <= a2 or a2 <= r2 <= a1) and np.isreal(r2): px = self._phi(r2) if px < p: a = r2 p = px dp = self._dphi(r2) return a, p, dp
def is_psd(m): eigvals = linalg.eigvals(m) return np.isreal(eigvals).all() and (eigvals >= 0).all()
def _modify_dataframe(self, df): """Add row to dataframe, containing numbers aggregated with self.operator.""" if self.total_rows == []: rows = df.index.tolist() else: rows = self.total_rows if self.operator is not OP_NONE: new_column = self.operator(df[df.applymap(np.isreal)], axis=1) new_column = new_column.fillna(0.) new_column[~new_column.index.isin(rows)] = '' else: new_column = pd.Series('', index=df.index) df_mod = df.copy() df_mod[self.column_name] = new_column return df_mod
def _modify_dataframe(self, df): """Create single index dataframe inserting grouping rows for higher levels.""" if self.total_columns == []: columns = df.columns else: columns = self.total_columns flat_row_list = [] n_ix_levels = len(df.index.levels) # For each row compare index tuple to previous one and see if it changed on any level. previous_tuple = [''] * n_ix_levels for level_k, index_tuple in enumerate(df.index): for level_i, sub_index in enumerate(index_tuple): if index_tuple[:level_i + 1] != previous_tuple[:level_i + 1]: if level_i == n_ix_levels - 1: # If we are on lowest level, add entire row to flat_df data_rows = df.iloc[[level_k], :] else: # If we are on higher level, add row filled with operator on lower level data if self.operator is OP_NONE: # For operator None fill row with empty string for each column data_rows = pd.DataFrame('', columns=df.columns, index=[sub_index]) else: df_subset = df.loc[index_tuple[:level_i + 1]] data_rows = self.operator(df_subset[df_subset.applymap(np.isreal)]).to_frame().T data_rows = data_rows.fillna(0.) data_rows.loc[:, ~data_rows.columns.isin(columns)] = '' n_rows = len(data_rows) data_rows.index = [sub_index] * n_rows data_rows.loc[:, ORG_ROW_NAMES] = pd.Series([index_tuple[:level_i + 1]], index=data_rows.index) flat_row_list.append(data_rows) # Need to address index_level with i instead of sub_index, because sub_index can repeat many times. self.index_level += [level_i] * n_rows previous_tuple = index_tuple flat_df = pd.concat(flat_row_list) flat_df.index.name = '' return flat_df
def test_it(self): self.assertTrue(np.isreal(ef.return_real_part(1))) self.assertTrue(np.isreal(ef.return_real_part(1+0j))) self.assertTrue(np.isreal(ef.return_real_part(1+1e-20j))) self.assertRaises(TypeError, ef.return_real_part, None) self.assertRaises(TypeError, ef.return_real_part, (1, 2., 2+2j)) self.assertRaises(TypeError, ef.return_real_part, [None, 2., 2+2j]) self.assertRaises(ValueError, ef.return_real_part, [1, 2., 2+2j]) self.assertRaises(ValueError, ef.return_real_part, 1+1e-10j) self.assertRaises(ValueError, ef.return_real_part, 1j)
def is_empty(self): if len(self.data) == 0: return True for v in self.data.values(): if is_sequence(v): if len(v) > 0: return False else: if bool(np.isreal(v)): return False return True
def impute(col): if col.apply(numpy.isreal).all(axis = 0): value = numpy.nanmedian(col) else: value = col.mode().iloc[0] return col.fillna(value)
def fmt_numerical(self, data): """ There are two categories of data Porn Sieve gathers: 1.Tag data, represented as a binary, mostly zero array of numbers 2. Data which is continuous, such as duration, average review, etc. For the tags, I can just use CountVectorizer out-of-the-box, but for the other data, we need to put it all together in a list on our own. """ nums = [] # sorted to ensure the data is always in the same order for k in sorted(data.keys()): if k in ["feedback", "img"]: pass elif type(data[k]) == list: pass elif data[k] == None: nums.append(0) elif (k == "scrape_date") and (type(data[k]) != float): stamp = datetime.strptime(data[k], "%Y-%m-%d %H:%M:%S.%f") epoch = datetime.utcfromtimestamp(0) nums.append((stamp - epoch).total_seconds()) elif np.isreal(data[k]): nums.append(data[k]) return nums
def check_finite_real(M): ''' Check that all entries in array M are finite and real-valued ''' if np.any(~np.isreal(M)): raise ValueError("Complex value encountered for real vector") if np.any(~np.isfinite(M)): raise ValueError("Non-finite number encountered") # need a faster covariance matrix checker
def check_covmat_fast(C,N=None,eps=1e-6): ''' Verify that matrix M is a size NxN precision or covariance matirx ''' if not type(C)==np.ndarray: raise ValueError("Covariance matrix should be a 2D numpy array") if not len(C.shape)==2: raise ValueError("Covariance matrix should be a 2D numpy array") if N is None: N = C.shape[0] if not C.shape==(N,N): raise ValueError("Expected size %d x %d matrix"%(N,N)) if np.any(~np.isreal(C)): raise ValueError("Covariance matrices should not contain complex numbers") C = np.real(C) if np.any(~np.isfinite(C)): raise ValueError("Covariance matrix contains NaN or ±inf!") if not np.all(np.abs(C-C.T)<eps): raise ValueError("Covariance matrix is not symmetric up to precision %0.1e"%eps) try: ch = chol(C) except numpy.linalg.linalg.LinAlgError: # Check smallest eigenvalue if cholesky fails mine = np.real(scipy.linalg.decomp.eigh(C,eigvals=(0,0))[0][0]) if np.any(mine<-eps): raise ValueError('Covariance matrix contains eigenvalue %0.3e<%0.3e'%(mine,-eps)) if (mine<eps): C = C + np.eye(N)*(eps-mine) C = 0.5*(C+C.T) return C
def _strekklikevekt(self, L, G_sno, T, H_0=None): """Finner kabelstrekk under gitte forhold for fastavspent ledning. Følgende likevektsligning ligger til grunn for beregningene: :math:`H_x^2 [H_x - H_0 + \\frac{EA(G_0 L)^2}{24H_0^2} + EA\\alpha \\Delta_T] = \\frac{EA(G_x L)^2}{24}` Ligningen for kabellikevekten er hentet fra «KL-bibelen» («Contact Lines for Electric Railways» av Kiessling, Puschmann etc.) ligning (5.57) side 282. Løsningen finnes ved å finne den reelle, positive egenverdien tilhørende "companion matrix" for residualfunksjonens koeffisienter. :param float H_0: Initiell spennkraft i kabel :math:`[N]` :param float E: Kabelens E-modul :math:`[\\frac{N}{mm^2}]` :param float A: Kabelens tverrsnittsareal :math:`[mm^2]` :param float G_0: Kabelens egenvekt :math:`[\\frac{N}{m}]` :param float G_sno: Egenvekt snølast :math:`[\\frac{N}{m}]` :param float L: Masteavstand :math:`[m]` :param float alpha: Lengdeutvidelseskoeffisient :math:`[\\frac{1}{^{\\circ}C}]` :param float T: Lufttemperatur :math:`[^{\\circ}C]` :return: Endelig kabelstrekk ``H_x`` :math:`[N]` :rtype: :class:`float` """ # Inngangsparametre if H_0 is not None: H_0 = H_0 else: H_0 = self.temperaturdata["5C"]["s"] E = self.E A = self.A G_0 = self.G_0 alpha = self.alpha G_x = G_0 + G_sno delta_T = T - 5 # Konstanter a = E * A * (G_x * L) ** 2 / 24 b = - H_0 + E * A * (G_0 * L) ** 2 / (24 * H_0 ** 2) + E * A * alpha * delta_T roots = numpy.roots([-1, -b, 0, a]) H_x = 0 for r in roots: if numpy.isreal(r) and r > 0: H_x = numpy.real(r) break return H_x
def _make_widget(self): self.widget = pg.GraphicsWindow(title="Curve") self.plot_item = self.widget.addPlot(title="Curve") self.plot_item_phase = self.widget.addPlot(row=1, col=0, title="Phase (deg)") self.plot_item_phase.setXLink(self.plot_item) self.plot_item.showGrid(y=True, alpha=1.) self.plot_item_phase.showGrid(y=True, alpha=1.) self.curve = self.plot_item.plot(pen='g') self.curve_phase = self.plot_item_phase.plot(pen='g') self._is_real = True self._set_real(True) #def _set_widget_value(self, new_value): # data = new_value # if data is None: # return # shape = np.shape(new_value) # if len(shape)>2: # raise ValueError("Shape of data should be (1) or (2, 1)") # if len(shape)==1: # x = np.linspace(0, len(data), len(data)) # y = [data] # if len(shape)==2: # if shape[0] == 1: # x = np.linspace(0, len(data), len(data[0])) # y = [data[0]] # if shape[0] >= 2: # x = data[0] # y = data[1:] # self._set_real(np.isreal(y).all()) # for i, values in enumerate(y): # self._display_curve_index(x, values, i) # while (i + 1 < len(self.curves)): # delete remaining curves # i += 1 # self.curves[i].hide() #def _display_curve_index(self, x, values, i): # y_mag = values if self._is_real else self._magnitude(values) # y_phase = np.zeros(len(values)) if self._is_real else \ # self._phase(values) # if len(self.curves)<=i: # color = self._defaultcolors[i%len(self._defaultcolors)] # self.curves.append(self.plot_item.plot(pen=color)) # self.curves_phase.append(self.plot_item_phase.plot(pen=color)) # self.curves[i].setData(x, y_mag) # self.curves_phase[i].setData(x, y_phase)
def __init__(self, value, shape=None, dtype=None): """ Create a new lazy array. `value` : may be an int, long, float, bool, NumPy array, iterator, generator or a function, `f(i)` or `f(i,j)`, depending on the dimensions of the array. `f(i,j)` should return a single number when `i` and `j` are integers, and a 1D array when either `i` or `j` or both is a NumPy array (in the latter case the two arrays must have equal lengths). """ self.dtype = dtype self.operations = [] if isinstance(value, basestring): raise TypeError("An larray cannot be created from a string") elif isinstance(value, larray): if shape is not None and value.shape is not None: assert shape == value.shape self._shape = shape or value.shape self.base_value = value.base_value self.dtype = dtype or value.dtype self.operations = value.operations # should deepcopy? elif isinstance(value, collections.Sized): # False for numbers, generators, functions, iterators if have_scipy and sparse.issparse(value): # For sparse matrices self.dtype = dtype or value.dtype elif not isinstance(value, numpy.ndarray): value = numpy.array(value, dtype=dtype) elif dtype is not None: assert numpy.can_cast(value.dtype, dtype, casting='safe') # or could convert value to the provided dtype if shape and value.shape != shape: raise ValueError("Array has shape %s, value has shape %s" % (shape, value.shape)) self._shape = value.shape self.base_value = value else: assert numpy.isreal(value) # also True for callables, generators, iterators self._shape = shape if dtype is None or isinstance(value, dtype): self.base_value = value else: try: self.base_value = dtype(value) except TypeError: self.base_value = value
def get_PGM_from_numpy_arr(arr, window_center, window_width, lut_min=0, lut_max=255): '''real-valued numpy input -> PGM-image formatted byte string arr: real-valued numpy array to display as grayscale image window_center, window_width: to define max/min values to be mapped to the lookup-table range. WC/WW scaling is done according to DICOM-3 specifications. lut_min, lut_max: min/max values of (PGM-) grayscale table: do not change ''' if np.isreal(arr).sum() != arr.size: raise ValueError # currently only support 8-bit colors if lut_max != 255: raise ValueError if arr.dtype != np.float64: arr = arr.astype(np.float64) # LUT-specific array scaling # width >= 1 (DICOM standard) window_width = max(1, window_width) wc, ww = np.float64(window_center), np.float64(window_width) lut_range = np.float64(lut_max) - lut_min minval = wc - 0.5 - (ww - 1.0) / 2.0 maxval = wc - 0.5 + (ww - 1.0) / 2.0 min_mask = (minval >= arr) to_scale = (arr > minval) & (arr < maxval) max_mask = (arr >= maxval) if min_mask.any(): arr[min_mask] = lut_min if to_scale.any(): arr[to_scale] = ((arr[to_scale] - (wc - 0.5)) / (ww - 1.0) + 0.5) * lut_range + lut_min if max_mask.any(): arr[max_mask] = lut_max # round to next integer values and convert to unsigned int arr = np.rint(arr).astype(np.uint8) # return PGM byte-data string return get_PGM_bytedata_string(arr)
def check_covmat(C,N=None,eps=1e-6): ''' Verify that matrix M is a size NxN precision or covariance matirx ''' if not type(C)==np.ndarray: raise ValueError("Covariance matrix should be a 2D numpy array") if not len(C.shape)==2: raise ValueError("Covariance matrix should be a 2D numpy array") if N is None: N = C.shape[0] if not C.shape==(N,N): raise ValueError("Expected size %d x %d matrix"%(N,N)) if np.any(~np.isreal(C)): raise ValueError("Covariance matrices should not contain complex numbers") C = np.real(C) if np.any(~np.isfinite(C)): raise ValueError("Covariance matrix contains NaN or ±inf!") if not np.all(np.abs(C-C.T)<eps): raise ValueError("Covariance matrix is not symmetric up to precision %0.1e"%eps) # Get just highest eigenvalue maxe = np.real(scipy.linalg.decomp.eigh(C,eigvals=(N-1,N-1))[0][0]) # Get just lowest eigenvalue mine = np.real(scipy.linalg.decomp.eigh(C,eigvals=(0,0))[0][0]) #if np.any(w<-eps): # raise ValueError('Covariance matrix contains eigenvalue %0.3e<%0.3e'%(np.min(w),-eps)) if mine<0: raise ValueError('Covariance matrix contains negative eigenvalue %0.3e'%mine) if (mine<eps): C = C + eye(N)*(eps-mine) # trucate spectrum at some small value # w[w<eps]=eps # Very large eigenvalues can also cause numeric problems # w[w>1./eps]=1./eps; # maxe = np.max(np.abs(w)) # if maxe>10./eps: # raise ValueError('Covariance matrix eigenvalues %0.2e is larger than %0.2e'%(maxe,10./eps)) # Rebuild matrix # C = v.dot(np.diag(w)).dot(v.T) # Ensure symmetry (only occurs as a numerical error for very large matrices?) C = 0.5*(C+C.T) return C # need a faster covariance matrix checker