我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.Inf()。
def _ncc_c(x, y): """ >>> _ncc_c([1,2,3,4], [1,2,3,4]) array([ 0.13333333, 0.36666667, 0.66666667, 1. , 0.66666667, 0.36666667, 0.13333333]) >>> _ncc_c([1,1,1], [1,1,1]) array([ 0.33333333, 0.66666667, 1. , 0.66666667, 0.33333333]) >>> _ncc_c([1,2,3], [-1,-1,-1]) array([-0.15430335, -0.46291005, -0.9258201 , -0.77151675, -0.46291005]) """ den = np.array(norm(x) * norm(y)) den[den == 0] = np.Inf x_len = len(x) fft_size = 1<<(2*x_len-1).bit_length() cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))) cc = np.concatenate((cc[-(x_len-1):], cc[:x_len])) return np.real(cc) / den
def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in range(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in range(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def SLcomputeSNR(X, Xnoisy): """ SLcomputeSNR Compute signal to noise ratio (SNR). Usage: SNR = SLcomputeSNR(X, Xnoisy) Input: X: 2D or 3D signal. Xnoisy: 2D or 3D noisy signal. Output: SNR: The signal to noise ratio (in dB). """ if np.linalg.norm(X-Xnoisy) == 0: return np.Inf else: return 10 * np.log10( np.sum(np.power(X,2)) / np.sum(np.power(X-Xnoisy,2)) )
def __init__(self,shape,z0rep_axes=(0,), z1rep_axes=(0,), map_est=False): Estim.__init__(self) self.shape = shape ndim = len(shape) if z0rep_axes == 'all': z0rep_axes = tuple(range(ndim)) if z1rep_axes == 'all': z1rep_axes = tuple(range(ndim)) self.z0rep_axes = z0rep_axes self.z1rep_axes = z1rep_axes self.cost_avail = True self.map_est = map_est # Initial variances self.zvar0_init= np.Inf self.zvar1_init= np.Inf
def __init__(self,y,shape,zrep_axes=(0,),thresh=0,perr=1e-6,\ var_init=np.Inf): Estim.__init__(self) self.y = y self.shape = shape self.thresh = thresh self.perr = perr self.cost_avail = True self.var_init = var_init # Set the repetition axes ndim = len(self.shape) if zrep_axes == 'all': zrep_axes = tuple(range(ndim)) self.zrep_axes = zrep_axes
def _reset(self): """Resets wait counter and cooldown counter. """ if self.mode not in ['auto', 'min', 'max']: warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, ' 'fallback to auto mode.' % (self.mode), RuntimeWarning) self.mode = 'auto' if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.epsilon) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0 self.lr_epsilon = self.min_lr * 1e-4
def test_invalid_nbins(): with raises(ValueError): ew = graynet.extract(subject_id_list, fs_dir, num_bins=np.NaN) with raises(ValueError): ew = graynet.extract(subject_id_list, fs_dir, num_bins=np.Inf) with raises(ValueError): ew = graynet.extract(subject_id_list, fs_dir, num_bins=2) # test_multi_edge() # test_multi_edge_CLI() # test_empty_subject_list() # test_run_no_IO() # test_run_roi_stats_via_API() # test_run_roi_stats_via_CLI() # test_CLI_only_weight_or_stats()
def initialize(self, length=None): """see ``__init__``""" if length is None: length = len(self.bounds) max_i = min((len(self.bounds) - 1, length - 1)) self._lb = array([self.bounds[min((i, max_i))][0] if self.bounds[min((i, max_i))][0] is not None else -np.Inf for i in xrange(length)], copy=False) self._ub = array([self.bounds[min((i, max_i))][1] if self.bounds[min((i, max_i))][1] is not None else np.Inf for i in xrange(length)], copy=False) lb = self._lb ub = self._ub # define added values for lower and upper bound self._al = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(lb[i])) / 20]) if isfinite(lb[i]) else 1 for i in rglen(lb)], copy=False) self._au = array([min([(ub[i] - lb[i]) / 2, (1 + np.abs(ub[i])) / 20]) if isfinite(ub[i]) else 1 for i in rglen(ub)], copy=False)
def update(self, decision): for context in self.contexts: if decision in self.contexts[context]: self.contexts_scores[context] += eta + np.random.randn()*1e-5 # special condition for names: if decision in women_names: self.women_names_score = np.Inf self.men_names_score = 0. self.robots_names_score = -1. if decision in men_names: self.women_names_score = 0. self.men_names_score = np.Inf self.robots_names_score = -1. if decision in robots_names: self.women_names_score = np.random.randn()*1e-5 self.men_names_score = np.random.randn()*1e-5 self.robots_names_score = np.Inf self.most_likely_context = max(self.contexts_scores.iteritems(), key=operator.itemgetter(1))[0] self.less_likely_context = min(self.contexts_scores.iteritems(), key=operator.itemgetter(1))[0]
def update(self, decision, weight): for context in CONTEXTS: if decision in CONTEXTS[context]: self.contexts_scores[context] += weight + np.random.randn()*1e-5 # special condition for names: if decision in women_names: self.women_names_score = np.Inf self.men_names_score = 0. self.robots_names_score = -1. if decision in men_names: self.women_names_score = 0. self.men_names_score = np.Inf self.robots_names_score = -1. if decision in robots_names: self.women_names_score = np.random.randn()*1e-5 self.men_names_score = np.random.randn()*1e-5 self.robots_names_score = np.Inf self.most_likely_context = max(self.contexts_scores.iteritems(), key=operator.itemgetter(1))[0] self.less_likely_context = min(self.contexts_scores.iteritems(), key=operator.itemgetter(1))[0]
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # define required options if not 'pBins' in self.opt: self.opt['pBins']=10 if not 'inertia' in self.opt: self.opt['inertia']=0.5 if not 'c1' in self.opt: self.opt['c1']=0.6 if not 'c2' in self.opt: self.opt['c2']=0.3 if not 'c3' in self.opt: self.opt['c3']=0.001 if self.opt['c3']==0: self.opt['c3']=1E-5 # define required variables self.pBestIdxs=np.arange(self.opt['population'], dtype=np.int32) self.gBestIdxs=np.arange(self.opt['population'], dtype=np.int32) self.velocities=np.zeros((self.opt['population'],self.opt['nVars'])) tmp=np.hstack((-np.Inf, np.linspace(0,1, num=self.opt['pBins']))) self.pBins=np.vstack((tmp[0:-1],tmp[1:]))
def _reset(self): """Resets wait counter and cooldown counter. """ if self.mode not in ['auto', 'min', 'max']: logging.warning('Learning Rate Plateau Reducing mode %s is unknown, ' 'fallback to auto mode.' % (self.mode)) self.mode = 'auto' if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.epsilon) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0 self.lr_epsilon = self.min_lr * 1e-4
def _get_best_trial(filename, cut=None): try: fh = open(filename, "r") trials = cPickle.load(fh) fh.close() current_best = numpy.Inf best_idx = 0 if cut is None: cut = len(trials['trials']) print filename, "#Trials", len(trials['trials']) for i, trial in enumerate(trials['trials'][:cut]): result = trial['result'] if result < current_best: best_idx = i current_best = result if current_best == numpy.Inf: raise Exception("%s does not contain any results" % filename) return current_best, best_idx except Exception as e: print "Problem with ", filename, e sys.stdout.flush() return None, None # TODO: Don't know whether this is longer needed
def plot_all_times_to_correct_decision(self,thr=0.5,stay_above=True,unit="spikes",spikemeasure="growing_spikecount", do_title=True): times = np.array([self.time_to_correct_decision(e,thr,stay_above,unit,spikemeasure) for e in self.experiments]).flatten() # times[30:50] = np.Inf maximum = int(np.ceil(max(times[times!=np.Inf]))) plt_inf = maximum+2 # for unsuccessful trials (time=Inf), set time to some value distinct from any actual decision time. times[times==np.Inf] = plt_inf fig = plt.figure(figsize=(hcPlotting.fig_width,hcPlotting.fig_height/3)) bins = np.hstack([np.arange(0.25,maximum+1,0.5),[plt_inf,plt_inf+1]]) n,_,_ = plt.hist(times,bins,color='k',edgecolor='w') ax = plt.gca() ax.set_xlim((0,plt_inf+1)) ax.set_ylim(ax.get_ylim()[0],ax.get_ylim()[1]+1) plt.plot((plt_inf,plt_inf),(0,ax.get_ylim()[1]),'r') ax.set_xticks(range(maximum+1)+[plt_inf+0.5]) ax.set_xticklabels([str(i) for i in range(maximum+1)]+[r'$\infty$']) ax.set_ylabel("nr. of trials") ax.set_xlabel("spikes observed before classification") if do_title: plt.title("thr = "+str(thr)+", stay_above = "+str(stay_above)+", classes: " +" vs. ".join(self.classes))
def __init__(self, monitor='val_loss', patience=0, verbose=0, mode='auto'): super(Callback, self).__init__() self.monitor = monitor self.patience = patience self.verbose = verbose self.wait = 0 self.best_epoch = 0 if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor: self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def __init__(self, monitor='val_loss', mode='auto', verbose=0): super(BestWeight, self).__init__() self.monitor = monitor self.mode = mode self.best_weights = None self.verbose = verbose if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor: self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def process_questions(C, all_words, n): scores = dist_function[0](C[:, all_words], W[all_words, :], W2) worst = -dist_function[1]*numpy.Inf for i in range(C.shape[0]): scores[i, C[i, :].nonzero()[1]] = worst if dist_function[1] > 0: hits = scores.argpartition(-n, axis=1)[:, -n:] answers = [sorted(hits[i], key=lambda hit: scores[i, hit], reverse=True) for i in range(len(hits))] else: hits = scores.argpartition(n, axis=1)[:, :n] answers = [sorted(hits[i], key=lambda hit: scores[i, hit], reverse=False) for i in range(len(hits))] if args.log_level > 1: small_scores = [scores[i, answers[i]] for i in xrange(hits.shape[0])] else: small_scores = None return answers, small_scores
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, mode='auto'): super(Callback, self).__init__() self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only if mode not in ['auto', 'min', 'max']: warnings.warn("ModelCheckpoint mode %s is unknown, fallback to auto mode" % (self.mode), RuntimeWarning) mode = 'auto' if mode == "min": self.monitor_op = np.less self.best = np.Inf elif mode == "max": self.monitor_op = np.greater self.best = -np.Inf else: if "acc" in self.monitor: self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def parse_location_string(loc_string): """ Parse a UCSC format location string (e.g. "chr2:1000-1100") and return an interval tuple in the format ('chr2', 1000, 1100). :param loc_string: Input location string :type loc_string: :ref:`location string <location_string>` :returns: (chromosome name, start coordinate, stop coordinate) """ chrom_fields = loc_string.split(':') chrom = chrom_fields[0] if len(chrom_fields) == 1: start, stop = 0, np.Inf else: pos_fields = chrom_fields[1].split('-') start, stop = (int(pos.replace(",", "")) for pos in pos_fields) return chrom, start, stop
def __init__(self, monitor='val_loss', cut_ratio=0.5, patience=2, scheduled_start_epoch=1, scheduled_cut_ratio=1.): """ Args: monitor: quantity to be monitored. cut_ratio: cut the learning rate by this percent. patience: number of epochs with no improvement after which training will be stopped. scheduled_start_epoch: from which epoch to do scheduled learning rate discount scheduled_cut_ratio: learning rate discount ratio. """ super(Callback, self).__init__() self.monitor = monitor self.patience = patience self.best = np.Inf self.wait = 0 self.cut_ratio = cut_ratio self.monitor_decrease = False self.scheduled_start_epoch = scheduled_start_epoch self.scheduled_cut_ratio = scheduled_cut_ratio
def _get_bounds(self, ib, dimension): """ib == 0/1 means lower/upper bound, return a vector of length `dimension` """ sign_ = 2 * ib - 1 assert sign_**2 == 1 if self.bounds is None or self.bounds[ib] is None: return np.array(dimension * [sign_ * np.Inf]) res = [] for i in range(dimension): res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])]) if res[-1] is None: res[-1] = sign_ * np.Inf return np.array(res)
def solve_static(self, F, up_, Dirichlet_bcs_up): # Solve stationary Navier-Stokes problem with Picard method # other methods may be more acurate and faster iter_ = 0 max_iter = 50 eps = 1.0 tol = 1E-3 under_relax_ratio = 0.7 up_temp = Function(self.function_space) # a temporal to save value in the Picard loop timer_solver = Timer("TimerSolveStatic") timer_solver.start() while (iter_ < max_iter and eps > tol): # solve the linear stokes flow to avoid up_s = 0 up_temp.assign(up_) # other solving methods up_ = self.solve_linear_problem(F, up_, Dirichlet_bcs_up) #up_s = self.solve_amg(F, Dirichlet_bcs_up, up_s) # AMG is not working with mixed function space diff_up = up_.vector().array() - up_temp.vector().array() eps = np.linalg.norm(diff_up, ord=np.Inf) print("iter = {:d}; eps_up = {:e}; time elapsed = {}\n".format(iter_, eps, timer_solver.elapsed())) ## underreleax should be defined here, Courant number, up_.vector()[:] = up_temp.vector().array() + diff_up * under_relax_ratio iter_ += 1 ## end of Picard loop timer_solver.stop() print("*" * 10 + " end of Navier-Stokes equation iteration" + "*" * 10) return up_
def gen(N, df, thinning=1): log_den = log_normal if df < np.Inf: log_den = grad_log_t_df(df) return metropolis_hastings(log_den, chain_size=N, thinning=thinning, x_prev=np.random.randn(), step=0.5) # estimate size of thinning
def get_thinning(X, nlags=50): autocorrelation = acf(X, nlags=nlags, fft=True) thinning = np.argmin(np.abs(autocorrelation - 0.95)) + 1 return thinning, autocorrelation # # X = gen(TEST_CHAIN_SIZE, np.Inf) # thinning, autocorr = get_thinning(X) # print('thinning for AR normal simulation ', thinning, autocorr[thinning])
def __init__(self, custom_model, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1): super(CustomModelCheckpoint, self).__init__() self.custom_model = custom_model self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only self.save_weights_only = save_weights_only self.period = period self.epochs_since_last_save = 0 if mode not in ['auto', 'min', 'max']: warnings.warn('CustomModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.' % (mode), RuntimeWarning) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def test_axis(self): # Vector norms. # Compare the use of `axis` with computing the norm of each row # or column separately. A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt) for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])] assert_almost_equal(norm(A, ord=order, axis=0), expected0) expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])] assert_almost_equal(norm(A, ord=order, axis=1), expected1) # Matrix norms. B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) nd = B.ndim for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']: for axis in itertools.combinations(range(-nd, nd), 2): row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if row_axis == col_axis: assert_raises(ValueError, norm, B, ord=order, axis=axis) else: n = norm(B, ord=order, axis=axis) # The logic using k_index only works for nd = 3. # This has to be changed if nd is increased. k_index = nd - (row_axis + col_axis) if row_axis < col_axis: expected = [norm(B[:].take(k, axis=k_index), ord=order) for k in range(B.shape[k_index])] else: expected = [norm(B[:].take(k, axis=k_index).T, ord=order) for k in range(B.shape[k_index])] assert_almost_equal(n, expected)
def test_keepdims(self): A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4) allclose_err = 'order {0}, axis = {1}' shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}' # check the order=None, axis=None case expected = norm(A, ord=None, axis=None) found = norm(A, ord=None, axis=None, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(None, None)) expected_shape = (1, 1, 1) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, None, None)) # Vector norms. for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]: for k in range(A.ndim): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(order, k)) expected_shape = list(A.shape) expected_shape[k] = 1 expected_shape = tuple(expected_shape) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k)) # Matrix norms. for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']: for k in itertools.permutations(range(A.ndim), 2): expected = norm(A, ord=order, axis=k) found = norm(A, ord=order, axis=k, keepdims=True) assert_allclose(np.squeeze(found), expected, err_msg=allclose_err.format(order, k)) expected_shape = list(A.shape) expected_shape[k[0]] = 1 expected_shape[k[1]] = 1 expected_shape = tuple(expected_shape) assert_(found.shape == expected_shape, shape_err.format(found.shape, expected_shape, order, k))
def power_plot(data, sfreq, toffset, log_scale, zscale, title): """Plot the computed power of the iq data.""" print("power") t_axis = numpy.arange(0, len(data)) / sfreq + toffset if log_scale: lrxpwr = 10 * numpy.log10(data + 1E-12) else: lrxpwr = data zscale_low, zscale_high = zscale if zscale_low == 0 and zscale_high == 0: if log_scale: zscale_low = numpy.min( lrxpwr[numpy.where(lrxpwr.real != -numpy.Inf)]) zscale_high = numpy.max(lrxpwr) + 3.0 else: zscale_low = numpy.min(lrxpwr) zscale_high = numpy.max(lrxpwr) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(t_axis, lrxpwr.real) ax.grid(True) ax.axis([toffset, t_axis[len(t_axis) - 1], zscale_low, zscale_high]) ax.set_xlabel('time (seconds)') if log_scale: ax.set_ylabel('power (dB)') else: ax.set_ylabel('power') ax.set_title(title) return fig
def spectrum_plot(data, freq, cfreq, toffset, log_scale, zscale, title, clr): """Plot a spectrum from the data for a given fft bin size.""" print("spectrum") tail_str = '' if log_scale: # pss = 10.0*numpy.log10(data / numpy.max(data)) pss = 10.0 * numpy.log10(data + 1E-12) tail_str = ' (dB)' else: pss = data print freq freq_s = freq / 1.0E6 + cfreq / 1.0E6 print freq_s zscale_low, zscale_high = zscale if zscale_low == 0 and zscale_high == 0: if log_scale: zscale_low = numpy.median( numpy.min(pss[numpy.where(pss.real != -numpy.Inf)])) - 3.0 zscale_high = numpy.median(numpy.max(pss)) + 3.0 else: zscale_low = numpy.median(numpy.min(pss)) zscale_high = numpy.median(numpy.max(pss)) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(freq_s, pss, clr) print freq_s[0], freq_s[-1], zscale_low, zscale_high ax.axis([freq_s[0], freq_s[-1], zscale_low, zscale_high]) ax.grid(True) ax.set_xlabel('frequency (MHz)') ax.set_ylabel('power spectral density' + tail_str, fontsize=12) ax.set_title(title) return fig
def rti_plot(data, extent, tick_locs, tick_labels, log_scale, zscale, title): # set to log scaling if log_scale: RTId = 10.0 * numpy.log10(data) else: RTId = data zscale_low, zscale_high = zscale if zscale_low == 0 and zscale_high == 0: if log_scale: zscale_low = numpy.median( numpy.min(RTId[numpy.where(RTId.real != -numpy.Inf)])) - 3.0 zscale_high = numpy.median(numpy.max(RTId)) + 10.0 else: zscale_low = numpy.median(numpy.min(RTId)) zscale_high = numpy.median(numpy.max(RTId)) vmin = zscale_low vmax = zscale_high fig = plt.figure() ax = fig.add_subplot(1, 1, 1) img = ax.imshow(RTId, origin='lower', extent=extent, interpolation='none', vmin=vmin, vmax=vmax, aspect='auto') # plot dates ax.set_xticks(tick_locs) ax.set_xticklabels(tick_labels, rotation=-45, fontsize=10) cb = fig.colorbar(img, ax=ax) ax.set_xlabel('time (seconds)', fontsize=12) ax.set_ylabel('range (km)', fontsize=12) ax.set_title(title) return fig
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto'): super(ModelCheckpoint, self).__init__() self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only self.save_weights_only = save_weights_only if mode not in ['auto', 'min', 'max']: warnings.warn('ModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.' % (mode), RuntimeWarning) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor: self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def on_train_begin(self, logs={}): self.wait = 0 # Allow instances to be re-used self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def reset(self): if self.mode not in ['auto', 'min', 'max']: warnings.warn('Learning Rate Plateau Reducing mode %s is unknown, ' 'fallback to auto mode.' % (self.mode), RuntimeWarning) self.mode = 'auto' if self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor): self.monitor_op = lambda a, b: np.less(a, b - self.epsilon) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0 self.lr_epsilon = self.min_lr * 1e-4
def _get_bounds(self, ib, dimension): """ib == 0/1 means lower/upper bound, return a vector of length `dimension` """ sign_ = 2 * ib - 1 assert sign_**2 == 1 if self.bounds is None or self.bounds[ib] is None: return array(dimension * [sign_ * np.Inf]) res = [] for i in range(dimension): res.append(self.bounds[ib][min([i, len(self.bounds[ib]) - 1])]) if res[-1] is None: res[-1] = sign_ * np.Inf return array(res)
def __init__(self, asedb, kvp={}, data={}, batch_size=1, selection=None, shuffle=True, prefetch=False, block_size=150000, capacity=5000, num_epochs=np.Inf, floatX=np.float32): super(ASEDataProvider, self).__init__(batch_size) self.asedb = asedb self.prefetch = prefetch self.selection = selection self.block_size = block_size self.shuffle = shuffle self.kvp = kvp self.data = data self.floatX = floatX self.feat_names = ['numbers', 'positions', 'cell', 'pbc'] + list(kvp.keys()) + list(data.keys()) self.shapes = [(None,), (None, 3), (3, 3), (3,)] + list(kvp.values()) + list(data.values()) self.epoch = 0 self.num_epochs = num_epochs self.n_rows = 0 # initialize queue with connect(self.asedb) as con: row = list(con.select(self.selection, limit=1))[0] feats = self.convert_atoms(row) dtypes = [np.array(feat).dtype for feat in feats] self.queue = tf.FIFOQueue(capacity, dtypes) self.placeholders = [ tf.placeholder(dt, name=name) for dt, name in zip(dtypes, self.feat_names) ] self.enqueue_op = self.queue.enqueue(self.placeholders) self.dequeue_op = self.queue.dequeue() self.preprocs = []
def read(self): retval = self.func() if isinstance(retval,numbers.Number) and retval != np.Inf:self.value.setText('%s'%(self.applySIPrefix(retval,self.units) )) else: self.value.setText(str(retval))
def read(self): retval = self.func() try: if isinstance(retval,numbers.Number) and retval != np.Inf:self.value.setText('%s'%(self.applySIPrefix(retval,self.units) )) else: self.value.setText(retval) except:self.value.setText(str(retval))
def read(self): retval = self.func(self.optionBox.currentText()) #if abs(retval)<1e4 and abs(retval)>.01:self.value.setText('%.3f %s '%(retval,self.units)) #else: self.value.setText('%.3e %s '%(retval,self.units)) if isinstance(retval,numbers.Number) and retval != np.Inf:self.value.setText('%s'%(self.applySIPrefix(retval,self.units) )) else: self.value.setText(str(retval)) if self.linkFunc: self.linkFunc(retval)
def _safe_db(num, den): """Properly handle the potential +Inf db SIR, instead of raising a RuntimeWarning. Only denominator is checked because the numerator can never be 0. """ if den == 0: return np.Inf return 10 * np.log10(num / den)