我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.logspace()。
def parameterChoosing(self): # Set the parameters by cross-validation tuned_parameters = [{'penalty': ['l1'], 'C': np.logspace(-5,5)}, {'penalty': ['l2'], 'C': np.logspace(-5,5)}] clf = GridSearchCV(linear_model.LogisticRegression(tol=1e-6), tuned_parameters, cv=5, scoring='precision_weighted') clf.fit(self.X_train, self.y_train.ravel()) print "Best parameters set found on development set:\n" print clf.best_params_ print "Grid scores on development set:\n" for params, mean_score, scores in clf.grid_scores_: print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params) print "Detailed classification report:\n" y_true, y_pred = self.y_test, clf.predict(self.X_test) print classification_report(y_true, y_pred)
def calc_IndCurrent_FD_spectrum(self): """Gives FD induced current spectrum""" #INITIALIZE ATTRIBUTES Bpx = self.Bpx Bpz = self.Bpz a2 = self.a2 azm = np.pi*self.azm/180. R = self.R L = self.L w = 2*np.pi*np.logspace(0,8,101) Ax = np.pi*a2**2*np.sin(azm) Az = np.pi*a2**2*np.cos(azm) Phi = (Ax*Bpx + Az*Bpz) EMF = -1j*w*Phi Is = EMF/(R + 1j*w*L) return EMF,Is
def WaveVelandSkindWidget(epsr, sigma): frequency = np.logspace(1, 9, 61) vel, skind = WaveVelSkind(frequency, epsr, 10**sigma) figure, ax = plt.subplots(1, 2, figsize = (10, 4)) ax[0].loglog(frequency, vel, 'b', lw=3) ax[1].loglog(frequency, skind, 'r', lw=3) ax[0].set_ylim(1e6, 1e9) ax[1].set_ylim(1e-1, 1e7) ax[0].set_xlabel('Frequency (Hz)') ax[0].set_ylabel('Velocity (m/s)') ax[1].set_xlabel('Frequency (Hz)') ax[1].set_ylabel('Skin Depth (m)') ax[0].grid(True) ax[1].grid(True) plt.show() return
def process(self, **kwargs): """Process module.""" self._rest_times = kwargs['rest_times'] self._rest_t_explosion = kwargs[self.key('resttexplosion')] outputs = OrderedDict() max_times = max(self._rest_times) if max_times > self._rest_t_explosion: outputs['dense_times'] = np.unique( np.concatenate(([0.0], [ x + self._rest_t_explosion for x in np.logspace( self.L_T_MIN, np.log10(max_times - self._rest_t_explosion), num=self._n_times) ], self._rest_times))) else: outputs['dense_times'] = np.array(self._rest_times) outputs['dense_indices'] = np.searchsorted( outputs['dense_times'], self._rest_times) return outputs
def main(table, schema): logger = get_root_logger() _ = get_header(logger, 'Building a model to predict Trump tweets') loc = get_path(__file__) + '/{0}' params = { 'features__text_processing__vect__ngram_range': [(1, 1), (1, 2), (1, 3)], 'clf__n_estimators': [int(x) for x in logspace(1, 3, num=10)] } model = RandomForestModel(table, schema, **params) model.train() model.evaluate() model.save(loc.format('saved_models'))
def _get_knot_spacing(self): """Returns a list of knot locations based on the spline parameters If the option `spacing` is 'lin', uses linear spacing 'log', uses log spacing Places 'spline_N' knots between 'spline_min' and 'spline_max' """ space_key = self.get_option('spacing').lower()[:3] if space_key == 'log': vol = np.logspace(np.log10(self.get_option('spline_min')), np.log10(self.get_option('spline_max')), self.get_option('spline_N')) elif space_key == 'lin': vol = np.linspace(self.get_option('spline_min'), self.get_option('spline_max'), self.get_option('spline_N')) else: raise KeyError("{:} only `lin`ear and `log` spacing are" "accepted".format(self.get_inform(1))) # end return vol
def plot(): ''' ''' # Register the functions builtins.__dict__.update(globals()) # Loop over various dataset sizes Narr = np.logspace(0, 5, 5) tpp = np.zeros_like(Narr) tbm = np.zeros_like(Narr) tps = np.zeros_like(Narr) for i, N in enumerate(Narr): tpp[i] = timeit.timeit('run_pp(%d)' % N, number = 10) / 10. if batman is not None: tbm[i] = timeit.timeit('run_bm(%d)' % N, number = 10) / 10. if ps is not None: tps[i] = timeit.timeit('run_ps(%d)' % N, number = 10) / 10. pl.plot(Narr, tpp, '-o', label = 'planetplanet') if batman is not None: pl.plot(Narr, tbm, '-o', label = 'batman') if ps is not None: pl.plot(Narr, tps, '-o', label = 'pysyzygy') pl.legend() pl.yscale('log') pl.xscale('log') pl.ylabel('Time [seconds]', fontweight = 'bold') pl.xlabel('Number of datapoints', fontweight = 'bold')
def fit_koff(nmax=523, NN=4e8, **params): tbind = params.pop("tbind") params["kd"] = 1e9/tbind dx = params.pop("dx") rw = randomwalk.get_rw(NAME, params, setup=setup_rw, calc=True) rw.domains[1].dx = dx times = draw_empirically(rw, N=NN, nmax=nmax, success=False) bins = np.logspace(np.log10(min(times)), np.log10(max(times)), 35) #bins = np.logspace(-3., 2., 35) hist, _ = np.histogram(times, bins=bins) cfd = np.cumsum(hist)/float(np.sum(hist)) t = 0.5*(bins[:-1] + bins[1:]) tmean = times.mean() toff = NLS(t, cfd, t0=tmean) koff = 1./toff return dict(t=t, cfd=cfd, toff=toff, tmean=tmean, koff=koff) ##### run rw in collect mode and draw bindings from empirical distributions
def exponential_hist(times, a, b, **params): cutoff = 0.03 # cutoff frequency in ms if len(times) == 0: return bins = np.logspace(a, b, 100) hist = plt.hist(times, bins=bins, alpha=0.5, **params) plt.xscale("log") params.pop("label") color = params.pop("color") total = integrate_hist(hist, cutoff) if sum(times > cutoff) == 0: return tmean = times[times > cutoff].mean() T = np.logspace(a-3, b, 1000) fT = np.exp(-T/tmean)*T/tmean fT *= total/integrate_values(T, fT, cutoff) plt.plot(T, fT, label="exp. fit, mean = %.2f ms" % (tmean,), color="dark" + color, **params) plt.xlim(10**a, 10**b)
def _update_data_x(self): if self.is_zero_span(): self._data_x = np.zeros(self.points) # data_x will be measured during first scan... return if self.logscale: raw_values = np.logspace( np.log10(self.start_freq), np.log10(self.stop_freq), self.points, endpoint=True) else: raw_values = np.linspace(self.start_freq, self.stop_freq, self.points, endpoint=True) values = np.zeros(len(raw_values)) for index, val in enumerate(raw_values): values[index] = self.iq.__class__.frequency. \ validate_and_normalize(self, val) # retrieve the real freqs... self._data_x = values
def MieQ_withWavelengthRange(m, diameter, wavelengthRange=(100,1600), nw=1000, logW=False): # http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ_withWavelengthRange if type(m) == complex and len(wavelengthRange)==2: if logW: wavelengths = np.logspace(np.log10(wavelengthRange[0]),np.log10(wavelengthRange[1]),nw) else: wavelengths = np.linspace(wavelengthRange[0],wavelengthRange[1],nw) _qD = [AutoMieQ(m,wavelength,diameter) for wavelength in wavelengths] elif type(m) in [np.ndarray,list,tuple] and len(wavelengthRange)==len(m): wavelengths=wavelengthRange _qD = [MieQ(emm,wavelength,diameter) for emm,wavelength in zip(m,wavelengths)] else: warnings.warn("Error: the size of the input data is minmatched. Please examine your inputs and try again.") return qext = np.array([q[0] for q in _qD]) qsca = np.array([q[1] for q in _qD]) qabs = np.array([q[2] for q in _qD]) g = np.array([q[3] for q in _qD]) qpr = np.array([q[4] for q in _qD]) qback = np.array([q[5] for q in _qD]) qratio = np.array([q[6] for q in _qD]) return wavelengths, qext, qsca, qabs, g, qpr, qback, qratio
def MieQ_withSizeParameterRange(m, xRange=(1,10), nx=1000, logX=False): # http://pymiescatt.readthedocs.io/en/latest/forward.html#MieQ_withSizeParameterRange if logX: xValues = list(np.logspace(np.log10(xRange[0]),np.log10(xRange[1]),nx)) else: xValues = list(np.linspace(xRange[0],xRange[1], nx)) dValues = [1000*x/np.pi for x in xValues] _qD = [AutoMieQ(m,1000,d) for d in dValues] qext = np.array([q[0] for q in _qD]) qsca = np.array([q[1] for q in _qD]) qabs = np.array([q[2] for q in _qD]) g = np.array([q[3] for q in _qD]) qpr = np.array([q[4] for q in _qD]) qback = np.array([q[5] for q in _qD]) qratio = np.array([q[6] for q in _qD]) return xValues, qext, qsca, qabs, g, qpr, qback, qratio
def grid_search_gamma(rbf_svm, X, y): ## grid search - gamma only # use a full grid over all parameters param_grid = {'gamma': np.logspace(-15, 4, num = 5000, base = 2.0)} grid_search = GridSearchCV(rbf_svm, param_grid = param_grid, scoring = 'roc_auc', cv = 10, pre_dispatch = '2*n_jobs', n_jobs = -1) # re-fit on the whole training data grid_search.fit(X, y) grid_search_scores = [score[1] for score in grid_search.grid_scores_] print('Best parameters : {}'.format(grid_search.best_params_)) print('Best score : {}'.format(grid_search.best_score_)) # set canvas fig, ax = plt.subplots(1, 1) # ax.scatter(X[:, 0], X[:, 1], c = y) ax.plot(param_grid['gamma'], grid_search_scores) ax.set_title('AUC = f(gamma, C = 1.0)', fontsize = 'large') ax.set_xlabel('gamma', fontsize = 'medium') ax.set_ylabel('AUC', fontsize = 'medium') return fig
def test_cv(): """Simple CV check.""" # XXX: don't use scikit-learn for tests. X, y = make_regression() cv = KFold(X.shape[0], 5) glm_normal = GLM(distr='gaussian', alpha=0.01, reg_lambda=0.1) # check that it returns 5 scores scores = cross_val_score(glm_normal, X, y, cv=cv) assert_equal(len(scores), 5) param_grid = [{'alpha': np.linspace(0.01, 0.99, 2)}, {'reg_lambda': np.logspace(np.log(0.5), np.log(0.01), 10, base=np.exp(1))}] glmcv = GridSearchCV(glm_normal, param_grid, cv=cv) glmcv.fit(X, y)
def test_l1l2path(): X_file = 'data_c/X_200_100.csv' Y_file = 'data_c/Y_200_100.csv' X = np.genfromtxt(X_file) Y = np.genfromtxt(Y_file) mu = 1e-3 tau_range = np.logspace(-2,0,3) k_max = 10000 tolerance = 1e-4 pc = pplus.PPlusConnection(debug=False, workers_servers = ('127.0.0.1',)) pc.submit(l1l2path_job, args=(X, Y, mu, tau_range, k_max, tolerance), modules=('numpy as np', 'ctypes')) result_keys = pc.collect() print result_keys print("Done")
def nextfastpower(n): """Return the next integral power of small factors greater than the given number. Specifically, return m such that m >= n m == 2**x * 3**y * 5**z where x, y, and z are integers. This is useful for ensuring fast FFT sizes. From https://gist.github.com/bhawkins/4479607 (Brian Hawkins) """ if n < 7: return max (n, 1) # x, y, and z are all bounded from above by the formula of nextpower. # Compute all possible combinations for powers of 3 and 5. # (Not too many for reasonable FFT sizes.) def power_series (x, base): nmax = ceil (log (x) / log (base)) return np.logspace (0.0, nmax, num=nmax+1, base=base) n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0)) n35 = n35[n35<=n] # Lump the powers of 3 and 5 together and solve for the powers of 2. n2 = nextpower (n / n35) return int (min (n2 * n35))
def _generate_segments(self, n_segments, n_superpositions=5): # Assume that the actual surface is a superposition of sinusoid # functions from which sample n_segments points and connect those # linearly # Generate sinusoids of the form -5 * sin(a * x + b) a = np.logspace(0, 0.5, n_superpositions) b = (0.25 * self.random_state.rand(n_superpositions) - 0.125) * np.pi # Generate x and y components of segments x = np.hstack((np.sort(self.random_state.rand(n_segments) * 8.0))) y = (-5 * np.sin(a * x[:, None] + b)).mean(axis=1) # Start at (0, 0) x[0] = y[0] = 0 # Planar segment at the end which is long enough to avoid shooting # over the border x[-1] = 100.0 y[-1] = y[-2] return np.vstack((x, y)).T
def parameterChoosing(self): # Set the parameters by cross-validation tuned_parameters = [{'alpha': np.logspace(-5,5) } ] reg = GridSearchCV(linear_model.Ridge(alpha = 0.5), tuned_parameters, cv=5, scoring='mean_squared_error') reg.fit(self.X_train, self.y_train) print "Best parameters set found on development set:\n" print reg.best_params_ print "Grid scores on development set:\n" for params, mean_score, scores in reg.grid_scores_: print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params) print reg.scorer_ print "MSE for test data set:" y_true, y_pred = self.y_test, reg.predict(self.X_test) print mean_squared_error(y_pred, y_true)
def parameterChoosing(self): # Set the parameters by cross-validation tuned_parameters = [{'kernel': ['rbf'], 'gamma': np.logspace(-4, 3, 30), 'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]}, {'kernel': ['poly'], 'degree': [1, 2, 3, 4], 'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000], 'coef0': np.logspace(-4, 3, 30)}, {'kernel': ['linear'], 'C': [1e-3, 1e-2, 1e-1, 1, 10, 100, 1000]}] clf = GridSearchCV(svm.SVC(C=1), tuned_parameters, cv=5, scoring='precision_weighted') clf.fit(self.X_train, self.y_train.ravel()) print "Best parameters set found on development set:\n" print clf.best_params_ print "Grid scores on development set:\n" for params, mean_score, scores in clf.grid_scores_: print "%0.3f (+/-%0.03f) for %r\n" % (mean_score, scores.std() * 2, params) print "Detailed classification report:\n" y_true, y_pred = self.y_test, clf.predict(self.X_test) print classification_report(y_true, y_pred)
def b2t(tb, n=1e2, logger=None, **kwargs): tb = np.array(tb) if isinstance(tb, type(1.1)): return a2t(b2a(tb)) if tb.shape == (): return a2t(b2a(tb)) if len(tb) < n: n = len(tb) tbs = -1.*np.logspace(np.log10(-tb.min()), np.log10(-tb.max()), n) ages = [] for i, tbi in enumerate(tbs): ages += a2t(b2a(tbi)), if logger: logger(i) ages = np.array(ages) return tbs, ages
def run(n_seeds, n_jobs, _run, _seed): seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max, size=n_seeds) exps = [] exps += [{'method': 'sgd', 'step_size': step_size} for step_size in np.logspace(-3, 3, 7)] exps += [{'method': 'gram', 'reduction': reduction} for reduction in [1, 4, 6, 8, 12, 24]] rundir = join(basedir, str(_run._id), 'run') if not os.path.exists(rundir): os.makedirs(rundir) Parallel(n_jobs=n_jobs, verbose=10)(delayed(single_run)(config_updates, rundir, i) for i, config_updates in enumerate(exps))
def run(n_seeds, n_jobs, _run, _seed): seed_list = check_random_state(_seed).randint(np.iinfo(np.uint32).max, size=n_seeds) exps = [] exps += [{'method': 'sgd', 'step_size': step_size} for step_size in np.logspace(-7, -7, 1)] exps += [{'method': 'gram', 'reduction': reduction} for reduction in [12]] rundir = join(basedir, str(_run._id), 'run') if not os.path.exists(rundir): os.makedirs(rundir) Parallel(n_jobs=n_jobs, verbose=10)(delayed(single_run)(config_updates, rundir, i) for i, config_updates in enumerate(exps))
def create_matrix_sparse_from_conf(conf): restypes = ['tdnn', 'lpfb'] # tdnn res weights = [] if 'restype' not in conf or conf['restype'] not in restypes: return None else: if conf['restype'] == 'tdnn': w_ = spa.dia_matrix(np.diag(np.ones((conf['N']-1,)), k = -1)) return w_ elif conf['restype'] == 'lpfb': # w_ = spa.dia_matrix(np.diag(1 - (np.logspace(1e-3, 1e-1, conf['N']) - 1), k = 0)) w_ = spa.dia_matrix(np.diag(1 - np.exp(np.linspace(-6, -0.69, conf['N'])), k = 0)) return w_ return None ################################################################################ # Standalone class for learning rules # - Recursive Least Squares (RLS, depends on rlspy.py): the vanilla online supervised # reservoir training method # - First-order reduced and controlled error or FORCE learning (Sussillo & Abbott, 2012) # - FORCEmdn: Mixture density output layer using FORCE rule (Berthold, 2017) # - Exploratory Hebbian learning (Legenstein & others, 2010)
def selectFixedOrLog(self): """ Check fixed or log-linear asymmetry parameter """ self.parameters['fixed_p'] = self.ui.radioButtonFixedP.isChecked() if self.parameters['fixed_p']: self.ui.radioButtonFixedP.setChecked(True) self.ui.radioButtonLogLinearP.setChecked(False) self.ui.frame_2.setEnabled(False) self.ui.frame.setEnabled(True) # self.p = lambda x: self.ui.spinBoxP.value() self.parameters['asym_param'] = self.ui.spinBoxP.value() else: self.ui.radioButtonFixedP.setChecked(False) self.ui.radioButtonLogLinearP.setChecked(True) self.ui.frame_2.setEnabled(True) self.ui.frame.setEnabled(False) self.parameters['asym_param'] = \ lambda x: _np.logspace(_np.log10(self.parameters['asym_param_start']), _np.log10(self.parameters['asym_param_end']),x) self.changed.emit()
def getOpts(opts): print("config opts...") opts['validation'] = 0.1 opts['exemplarSize'] = 127 opts['instanceSize'] = 255-2*8 opts['lossRPos'] = 16 opts['lossRNeg'] = 0 opts['labelWeight'] = 'balanced' opts['numPairs'] = 53200 opts['frameRange'] = 100 opts['trainNumEpochs'] = 50 opts['trainLr'] = np.logspace(-2, -5, opts['trainNumEpochs']) opts['trainWeightDecay'] = 5e-04 opts['randomSeed'] = 1 opts['momentum'] = 0.9 opts['stddev'] = 0.01 opts['start'] = 0 opts['expName'] = '_20170511_s_tn_001' opts['summaryFile'] = './data_20170511/'+opts['expName'] opts['ckptPath'] = './ckpt/'+opts['expName'] return opts
def getOpts(opts): print("config opts...") opts['validation'] = 0.1 opts['exemplarSize'] = 127 opts['instanceSize'] = 255-2*8 opts['lossRPos'] = 16 opts['lossRNeg'] = 0 opts['labelWeight'] = 'balanced' opts['numPairs'] = 53200 opts['frameRange'] = 100 opts['trainNumEpochs'] = 50 opts['trainLr'] = np.logspace(-2, -5, opts['trainNumEpochs']) opts['trainWeightDecay'] = 5e-04 opts['randomSeed'] = 1 opts['momentum'] = 0.9 opts['stddev'] = 0.01 opts['start'] = 0 opts['expName'] = '20170518_tn_o_001' opts['summaryFile'] = './data_20170518/'+opts['expName'] opts['ckptPath'] = './ckpt/'+opts['expName'] return opts
def CAL_v(name, label_p, label_n, oracle, n_features, ftype, test_x, test_y): online = OnlineBase(name, label_p, label_n, oracle, n_features, ftype, error=.5) x, y = online.collect_pts(100, -1) i = 0 q = online.get_n_query() C_range = np.logspace(-2, 5, 10, base=10) gamma_range = np.logspace(-5, 1, 10, base=10) param_grid = dict(gamma=gamma_range, C=C_range) while q < 3500: i += 1 # h_ = ex.fit(x, y) cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42) grid = GridSearchCV(svm.SVC(), param_grid=param_grid, cv=cv, verbose=0, n_jobs=-1) grid.fit(x, y) h_ = grid.best_estimator_ online_ = OnlineBase('', label_p, label_n, h_.predict, n_features, ftype, error=.1) x_, _ = online_.collect_pts(10, 200) if x_ is not None and len(x_) > 0: x.extend(x_) y.extend(oracle(x_)) q += online_.get_n_query() pred_y = h_.predict(test_x) print len(x), q, sm.accuracy_score(test_y, pred_y)
def grid_retrain_in_f(self, n_dim=500): rbf_map = RBFSampler(n_dim, random_state=1) fourier_approx_svm = pipeline.Pipeline([("mapper", rbf_map), ("svm", LinearSVC())]) # C_range = np.logspace(-5, 15, 21, base=2) # gamma_range = np.logspace(-15, 3, 19, base=2) # param_grid = dict(mapper__gamma=gamma_range, svm__C=C_range) # cv = StratifiedShuffleSplit(Y, n_iter=5, test_size=0.2, random_state=42) # grid = GridSearchCV(fourier_approx_svm, param_grid=param_grid, cv=cv) # grid.fit(X, Y) # # rbf_svc2 = grid.best_estimator_ rbf_svc2 = fourier_approx_svm rbf_svc2.fit(self.X_ex, self.y_ex) self.set_clf2(rbf_svc2) return self.benchmark()
def grid_search(self): C_range = np.logspace(-5, 15, 21, base=2) param_grid = dict(C=C_range) cv = StratifiedShuffleSplit(self.y_ex, n_iter=5, test_size=0.2, random_state=42) grid = GridSearchCV(SVC(kernel='poly', max_iter=10000), param_grid=param_grid, cv=cv, n_jobs=1, verbose=0) logger.info('start grid search for Linear') grid.fit(self.X_ex, self.y_ex) logger.info('end grid search for Linear') scores = [x[1] for x in grid.grid_scores_] # final train clf = grid.best_estimator_ pred_train = clf.predict(self.X_ex) pred_val = clf.predict(self.val_x) pred_test = clf.predict(self.test_x) r = Result(self.name + ' (X)', 'Poly', len(self.X_ex), sm.accuracy_score(self.y_ex, pred_train), sm.accuracy_score(self.val_y, pred_val), sm.accuracy_score(self.test_y, pred_test)) return r
def wage_data_linear(): X, y = wage() gam = LinearGAM(n_splines=10) gam.gridsearch(X, y, lam=np.logspace(-5,3,50)) XX = generate_X_grid(gam) plt.figure() fig, axs = plt.subplots(1,3) titles = ['year', 'age', 'education'] for i, ax in enumerate(axs): ax.plot(XX[:, i], gam.partial_dependence(XX, feature=i+1)) ax.plot(XX[:, i], *gam.partial_dependence(XX, feature=i+1, width=.95)[1], c='r', ls='--') if i == 0: ax.set_ylim(-30,30); ax.set_title(titles[i]) fig.tight_layout() plt.savefig('imgs/pygam_wage_data_linear.png', dpi=300)
def plot_pdf_log2(x, nbins=10, **kwargs): ''' Adds a log-log PDF plot to the current axes. The PDF is binned with logarithmic binning of base 2. Arguments --------- x : array_like The data to plot nbins : integer The number of bins to take Additional keyword arguments are passed to `matplotlib.pyplot.loglog`. ''' x = np.asarray(x) exp_max = np.ceil(np.log2(x.max())) bins = np.logspace(0, exp_max, exp_max + 1, base=2) ax = plt.gca() hist, _ = np.histogram(x, bins=bins) binsize = np.diff(np.asfarray(bins)) hist = hist / binsize ax.loglog(bins[1:], hist, 'ow', **kwargs) return ax
def get_model(): if FLAGS.model == 'logistic': return linear_model.LogisticRegressionCV(class_weight='balanced', scoring='roc_auc', n_jobs=FLAGS.n_jobs, max_iter=10000, verbose=1) elif FLAGS.model == 'random_forest': return ensemble.RandomForestClassifier(n_estimators=100, n_jobs=FLAGS.n_jobs, class_weight='balanced', verbose=1) elif FLAGS.model == 'svm': return grid_search.GridSearchCV( estimator=svm.SVC(kernel='rbf', gamma='auto', class_weight='balanced'), param_grid={'C': np.logspace(-4, 4, 10)}, scoring='roc_auc', n_jobs=FLAGS.n_jobs, verbose=1) else: raise ValueError('Unrecognized model %s' % FLAGS.model)
def __init__(self, model, ax=None, alphas=None, cv=None, scoring=None, **kwargs): # Check to make sure this is not a "RegressorCV" name = model.__class__.__name__ if name.endswith("CV"): raise YellowbrickTypeError(( "'{}' is a CV regularization model;" " try AlphaSelection instead." ).format(name)) # Call super to initialize the class super(ManualAlphaSelection, self).__init__(model, ax=ax, **kwargs) # Set manual alpha selection parameters self.alphas = alphas or np.logspace(-10, -2, 200) self.errors = None self.score_method = partial(cross_val_score, cv=cv, scoring=scoring)
def create_grid(self,abins=None,zbins=None): if abins is None and zbins is None: filenames = glob.glob(self.get_dirname()+'/%s_*.dat'%(self._prefix)) data = np.array([self.filename2params(f) for f in filenames]) if not len(data): msg = "No isochrone files found in: %s"%self.get_dirname() raise Exception(msg) arange = np.unique(data[:,0]) zrange = np.unique(data[:,1]) elif abins is not None and zbins is not None: # Age in units of Gyr arange = np.linspace(abins[0],abins[1],abins[2]+1) # Metallicity sampled logarithmically zrange = np.logspace(np.log10(zbins[0]),np.log10(zbins[1]),zbins[2]+1) else: msg = "Must specify both `abins` and `zbins` or neither" raise Exception(msg) aa,zz = np.meshgrid(arange,zrange) return aa.flatten(),zz.flatten()
def search_queue_params(): df = [] data_batch_sizes = np.logspace(0, 8, num=9, base=2, dtype=int) capacities = np.logspace(0, 12, num=13, base=2, dtype=int) nthreads = np.logspace(0, 5, num=6, base=2, dtype=int) for nth in nthreads: for data_batch_size in data_batch_sizes: for capacity in capacities: cap = nth * capacity tf.reset_default_graph() d = DataHDF5(batch_size=data_batch_size) queue = data.Queue(d.node, d, queue_type='fifo', batch_size=BATCH_SIZE, capacity=cap, n_threads=nth) queue.kind = '{} / {} / {}'.format(nth, data_batch_size, capacity) durs = time_tf(queue) durs['data batch size'] = data_batch_size durs['queue capacity'] = cap durs['nthreads'] = nth df.append(durs) d.cleanup() df = pandas.concat(df, ignore_index=True) df.kind = df.kind.astype('category', ordered=True, categories=df.kind.unique()) df.to_pickle('/home/qbilius/mh17/computed/search_queue_params.pkl') print(df.groupby(['nthreads', 'data batch size', 'queue capacity']).dur.mean())
def scaled_histogram(data, num_points, scale): if scale == 'linear': hist, edges = np.histogram(data, bins=max([5, int(num_points / 50)])) else: # Conditional catches an empty data input. h1, h2 = ((np.log10(min(data)), np.log10(max(data))) if len(data) > 0 else (0, 1)) hist, edges = np.histogram(data, bins=np.logspace(h1, h2, 1 + max([5, int(num_points / 50)]))) hist_max = max(hist) * 1.1 return hist, edges, hist_max
def bernoulli_gaussian_trial(M=250,N=500,L=1000,pnz=.1,kappa=None,SNR=40): A = np.random.normal(size=(M, N), scale=1.0 / math.sqrt(M)).astype(np.float32) if kappa >= 1: # create a random operator with a specific condition number U,_,V = la.svd(A,full_matrices=False) s = np.logspace( 0, np.log10( 1/kappa),M) A = np.dot( U*(s*np.sqrt(N)/la.norm(s)),V).astype(np.float32) A_ = tf.constant(A,name='A') prob = TFGenerator(A=A,A_=A_,pnz=pnz,kappa=kappa,SNR=SNR) prob.name = 'Bernoulli-Gaussian, random A' bernoulli_ = tf.to_float( tf.random_uniform( (N,L) ) < pnz) xgen_ = bernoulli_ * tf.random_normal( (N,L) ) noise_var = pnz*N/M * math.pow(10., -SNR / 10.) ygen_ = tf.matmul( A_,xgen_) + tf.random_normal( (M,L),stddev=math.sqrt( noise_var ) ) prob.xval = ((np.random.uniform( 0,1,(N,L))<pnz) * np.random.normal(0,1,(N,L))).astype(np.float32) prob.yval = np.matmul(A,prob.xval) + np.random.normal(0,math.sqrt( noise_var ),(M,L)) prob.xinit = ((np.random.uniform( 0,1,(N,L))<pnz) * np.random.normal(0,1,(N,L))).astype(np.float32) prob.yinit = np.matmul(A,prob.xinit) + np.random.normal(0,math.sqrt( noise_var ),(M,L)) prob.xgen_ = xgen_ prob.ygen_ = ygen_ prob.noise_var = noise_var return prob
def test_gradE(adjcube): """Tests the gradient of `E` using finite difference methods. """ from pydft.bases.fourier import gradE, E from numpy.matlib import randn cell = adjcube V = QHO(cell) Ns=4 #He set the random seed; we could do the same, but the #implementation is probably different between numpy and matlab: #randn('seed', 0.2004) W = np.array(randn(np.prod(cell.S), Ns) + 1j*randn(np.prod(cell.S), Ns)) # Compute intial energy and gradient E0 = E(V, W, cell) g0 = gradE(V, W, cell) # Choose a random direction to explore dW = np.array(randn(W.shape) + 1j*randn(W.shape)) # Explore a range of step sizes decreasing by powers of ten steps = np.logspace(np.log10(1e-3), np.log10(1e-7), 8) for delta in steps: # Directional derivative formula dE = 2*np.real(np.trace(np.dot(g0.conjugate().T, delta*dW))) # Print ratio of actual change to expected change, along with estimate # of the error in this quantity due to rounding ratio = abs(1.-(E(V, W+delta*dW, cell)-E0)/dE) print(int(np.log10(ratio)), int(np.log10(delta)), ratio) assert abs(int(np.log10(ratio)) - int(np.log10(delta))) <= 2
def fcn_FDEM_InductionSpherePlaneWidget(xtx,ytx,ztx,m,orient,x0,y0,z0,a,sig,mur,xrx,yrx,zrx,logf,Comp,Phase): sig = 10**sig f = 10**logf fvec = np.logspace(0,8,41) xmin, xmax, dx, ymin, ymax, dy = -30., 30., 0.3, -30., 30., 0.4 X,Y = np.mgrid[xmin:xmax+dx:dx, ymin:ymax+dy:dy] X = np.transpose(X) Y = np.transpose(Y) Obj = SphereFEM(m,orient,xtx,ytx,ztx) Hx,Hy,Hz,Habs = Obj.fcn_ComputeFrequencyResponse(f,sig,mur,a,x0,y0,z0,X,Y,zrx) Hxi,Hyi,Hzi,Habsi = Obj.fcn_ComputeFrequencyResponse(fvec,sig,mur,a,x0,y0,z0,xrx,yrx,zrx) fig1 = plt.figure(figsize=(17,6)) Ax1 = fig1.add_axes([0.04,0,0.43,1]) Ax2 = fig1.add_axes([0.6,0,0.4,1]) if Comp == 'x': Ax1 = plotAnomalyXYplane(Ax1,f,X,Y,ztx,Hx,Comp,Phase) Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a) Ax2 = plotResponseFEM(Ax2,f,fvec,Hxi,Comp) elif Comp == 'y': Ax1 = plotAnomalyXYplane(Ax1,f,X,Y,ztx,Hy,Comp,Phase) Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a) Ax2 = plotResponseFEM(Ax2,f,fvec,Hyi,Comp) elif Comp == 'z': Ax1 = plotAnomalyXYplane(Ax1,f,X,Y,ztx,Hz,Comp,Phase) Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a) Ax2 = plotResponseFEM(Ax2,f,fvec,Hzi,Comp) elif Comp == 'abs': Ax1 = plotAnomalyXYplane(Ax1,f,X,Y,ztx,Habs,Comp,Phase) Ax1 = plotPlaceTxRxSphereXY(Ax1,xtx,ytx,xrx,yrx,x0,y0,a) Ax2 = plotResponseFEM(Ax2,f,fvec,Habsi,Comp) plt.show(fig1)
def calc_IndCurrent_TD_offtime(self): """Gives FD induced current spectrum""" #INITIALIZE ATTRIBUTES Bpx = self.Bpx Bpz = self.Bpz a2 = self.a2 azm = np.pi*self.azm/180. R = self.R L = self.L t = np.logspace(-6,0,101) Ax = np.pi*a2**2*np.sin(azm) Az = np.pi*a2**2*np.cos(azm) Phi = (Ax*Bpx + Az*Bpz) Is = (Phi/L)*np.exp(-(R/L)*t) V = (Phi*R/L)*np.exp(-(R/L)*t) - (Phi*R/L**2)*np.exp(-(R/L)*t) return V,Is ########################################### # PLOTTING FUNCTIONS ###########################################
def plot_InducedCurrent_FD(self,Ax,Is,fi): FS = 20 R = self.R L = self.L Imax = np.max(-np.real(Is)) f = np.logspace(0,8,101) Ax.grid('both', linestyle='-', linewidth=0.8, color=[0.8, 0.8, 0.8]) Ax.semilogx(f,-np.real(Is),color='k',linewidth=4,label="$I_{Re}$") Ax.semilogx(f,-np.imag(Is),color='k',ls='--',linewidth=4,label="$I_{Im}$") Ax.semilogx(fi*np.array([1.,1.]),np.array([0,1.1*Imax]),color='r',ls='-',linewidth=3) handles, labels = Ax.get_legend_handles_labels() Ax.legend(handles, labels, loc='upper left', fontsize=FS) Ax.set_xlabel('Frequency [Hz]',fontsize=FS+2) Ax.set_ylabel('$\mathbf{- \, I_s (\omega)}$ [A]',fontsize=FS+2,labelpad=-10) Ax.set_title('Frequency Response',fontsize=FS) Ax.set_ybound(0,1.1*Imax) Ax.tick_params(labelsize=FS-2) Ax.yaxis.set_major_formatter(FormatStrFormatter('%.1e')) #R_str = '{:.3e}'.format(R) #L_str = '{:.3e}'.format(L) #f_str = '{:.3e}'.format(fi) #EMF_str = '{:.2e}j'.format(EMFi.imag) #I_str = '{:.2e} - {:.2e}j'.format(float(np.real(Isi)),np.abs(float(np.imag(Isi)))) #Ax.text(1.4,1.01*Imax,'$R$ = '+R_str+' $\Omega$',fontsize=FS) #Ax.text(1.4,0.94*Imax,'$L$ = '+L_str+' H',fontsize=FS) #Ax.text(1.4,0.87*Imax,'$f$ = '+f_str+' Hz',fontsize=FS,color='r') #Ax.text(1.4,0.8*Imax,'$V$ = '+EMF_str+' V',fontsize=FS,color='r') #Ax.text(1.4,0.73*Imax,'$I_s$ = '+I_str+' A',fontsize=FS,color='r') return Ax
def plot_InducedCurrent_TD(self,Ax,Is,ti,Vi,Isi): FS = 20 R = self.R L = self.L Imax = np.max(Is) t = np.logspace(-6,0,101) Ax.grid('both', linestyle='-', linewidth=0.8, color=[0.8, 0.8, 0.8]) Ax.semilogx(t,Is,color='k',linewidth=4) Ax.semilogx(ti*np.array([1.,1.]),np.array([0,1.3*Imax]),color='r',ls='-',linewidth=3) Ax.set_xlabel('Time [s]',fontsize=FS+2) Ax.set_ylabel('$\mathbf{I_s (\omega)}$ [A]',fontsize=FS+2,labelpad=-10) Ax.set_title('Transient Induced Current',fontsize=FS) Ax.set_ybound(0,1.2*Imax) Ax.tick_params(labelsize=FS-2) Ax.yaxis.set_major_formatter(FormatStrFormatter('%.1e')) #R_str = '{:.3e}'.format(R) #L_str = '{:.3e}'.format(L) #t_str = '{:.3e}'.format(ti) #V_str = '{:.3e}'.format(Vi) #I_str = '{:.3e}'.format(Isi) #Ax.text(1.4e-6,1.12*Imax,'$R$ = '+R_str+' $\Omega$',fontsize=FS) #Ax.text(1.4e-6,1.04*Imax,'$L$ = '+L_str+' H',fontsize=FS) #Ax.text(4e-2,1.12*Imax,'$t$ = '+t_str+' s',fontsize=FS,color='r') #Ax.text(4e-2,1.04*Imax,'$V$ = '+V_str+' V',fontsize=FS,color='r') #Ax.text(4e-2,0.96*Imax,'$I_s$ = '+I_str+' A',fontsize=FS,color='r') return Ax
def __init__(self): self.genMesh() self.getCoreDomain() # url = "http://em.geosci.xyz/_images/disc_dipole.png" # response = requests.get(url) # self.im = Image.open(StringIO(response.content)) self.time = np.logspace(-5, -2, 41)
def test_from_float_hex(self): # IEEE doubles and floats only, otherwise the float32 # conversion may fail. tgt = np.logspace(-10, 10, 5).astype(np.float32) tgt = np.hstack((tgt, -tgt)).astype(np.float) inp = '\n'.join(map(float.hex, tgt)) c = TextIO() c.write(inp) for dt in [np.float, np.float32]: c.seek(0) res = np.loadtxt(c, dtype=dt) assert_equal(res, tgt, err_msg="%s" % dt)
def FrequencyAxis(Fmin, Fmax, Fnum, Log=True): """ Compute a lin/log spaced frequency axis. """ # Computing frequency axis if Log: Freq = _np.logspace(_np.log10(Fmin), _np.log10(Fmax), Fnum) else: Freq = _np.linspace(Fmin, Fmax, Fnum) return Freq
def test_call(self): """Test that the bump EOS can be called """ bump_eos = EOSBump() vol = np.logspace(np.log10(.1), np.log10(1), 50) pressure = bump_eos(vol) self.assertIsInstance(pressure, np.ndarray) self.assertEqual(50, len(pressure))
def test_derivative(self): """Test the derivative function """ bump_eos = EOSBump() vol = np.logspace(np.log10(.1), np.log10(1), 50) pressure = bump_eos.derivative()(vol) self.assertIsInstance(pressure, np.ndarray) self.assertEqual(50, len(pressure))
def test_bad_derivative(self): """Tests that derivative errors are caught """ bump_eos = EOSBump() vol = np.logspace(np.log10(.1), np.log10(1), 50) with self.assertRaises(IOError): pressure = bump_eos.derivative(order=2)(vol) # end p_fun = lambda v: 2.56e9 / v**3