我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.inf()。
def plot_eigenvectors(Vh, U, mytitle, which = [0,1,2,5,10,15]): assert len(which) % 3 == 0 nrows = len(which) / 3 subplot_loc = nrows*100 + 30 plt.figure(figsize=(18,4*nrows)) title_stamp = mytitle + " {0}" u = dl.Function(Vh) counter=1 for i in which: assert i < U.shape[1] Ui = U[:,i] if Ui[0] >= 0: s = 1./np.linalg.norm(Ui, np.inf) else: s = -1./np.linalg.norm(Ui, np.inf) u.vector().set_local(s*Ui) plot(u, subplot_loc=(subplot_loc+counter), mytitle=title_stamp.format(i), vmin=-1, vmax=1) counter = counter+1
def exportU(self, Vh, fname, varname = "evect", normalize=1): """ Export in paraview the generalized eigenvectors U. Inputs: - Vh: the parameter finite element space - fname: the name of the paraview output file - varname: the name of the paraview variable - normalize: if True the eigenvector are rescaled such that || u ||_inf = 1 """ evect = Function(Vh, name=varname) fid = File(fname) for i in range(0,self.U.shape[1]): Ui = self.U[:,i] if normalize: s = 1/np.linalg.norm(Ui, np.inf) evect.vector().set_local(s*Ui) else: evect.vector().set_local(Ui) fid << evect
def __init__( self, get_params_function, try_params_function ): self.get_params = get_params_function self.try_params = try_params_function self.max_iter = 81 # maximum iterations per configuration self.eta = 3 # defines configuration downsampling rate (default = 3) self.logeta = lambda x: log( x ) / log( self.eta ) self.s_max = int( self.logeta( self.max_iter )) self.B = ( self.s_max + 1 ) * self.max_iter self.results = [] # list of dicts self.counter = 0 self.best_loss = np.inf self.best_counter = -1 # can be called multiple times
def load_data(infile, chroms, resolutions): starts = infile['starts'][...] chromosomes = infile['chromosomes'][...] data = {} for res in resolutions: data[res] = {} for i, chrom in enumerate(chromosomes): if chrom not in chroms: continue start = (starts[i] / res) * res dist = infile['dist.%s.%i' % (chrom, res)][...] valid_rows = infile['valid.%s.%i' % (chrom, res)][...] corr = infile['corr.%s.%i' % (chrom, res)][...] valid = numpy.zeros(corr.shape, dtype=numpy.bool) N, M = corr.shape valid = numpy.zeros((N, M), dtype=numpy.int32) for i in range(min(N - 1, M)): P = N - i - 1 valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P] temp = corr * dist valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False data[res][chrom] = [start, temp, valid] return data
def monotoneTFosc(f): """Maps [-inf,inf] to [-inf,inf] with different constants for positive and negative part. """ if np.isscalar(f): if f > 0.: f = np.log(f) / 0.1 f = np.exp(f + 0.49 * (np.sin(f) + np.sin(0.79 * f))) ** 0.1 elif f < 0.: f = np.log(-f) / 0.1 f = -np.exp(f + 0.49 * (np.sin(0.55 * f) + np.sin(0.31 * f))) ** 0.1 return f else: f = np.asarray(f) g = f.copy() idx = (f > 0) g[idx] = np.log(f[idx]) / 0.1 g[idx] = np.exp(g[idx] + 0.49 * (np.sin(g[idx]) + np.sin(0.79 * g[idx])))**0.1 idx = (f < 0) g[idx] = np.log(-f[idx]) / 0.1 g[idx] = -np.exp(g[idx] + 0.49 * (np.sin(0.55 * g[idx]) + np.sin(0.31 * g[idx])))**0.1 return g
def get_float_parameter(self, param_id): return self.RAPI_rc(vrep.simxGetFloatingParameter( self.cID, param_id, vrep.simx_opmode_blocking))[0] # openai/gym # Set this in SOME subclasses #metadata = {'render.modes': []} #reward_range = (-np.inf, np.inf) # Override in SOME subclasses #def _close(self): pass # Set these in ALL subclasses #action_space = None #observation_space = None # Override in ALL subclasses #def _step(self, action): raise NotImplementedError #def _reset(self): raise NotImplementedError #def _render(self, mode='human', close=False): return #def _seed(self, seed=None): return []
def _solveRelativeDG(self, points): """ Solves the norm constrained version of the problem. min sum z_q st z_q >= c'x_q - 1 z_q >= 1 - c'x_q A'y = c b'y = 1 ||c|| = 1 y >= 0 """ if self.normalize_c == 1: error = self._solveRelativeDGNorm1(points) elif self.normalize_c == np.inf: error = self._solveRelativeDGNormInf(points) return error
def _solveFeasibleProjection(self, points): m, n = self.A.shape bestResult = np.inf for i in range(m): if i in self.ban_constraints: result = np.inf else: ai = self.A[i] bi = self.b[i] result = self._project_to_hyperplane(points, ai, bi) if result < bestResult: bestResult = result self.dual = np.zeros(m) self.dual[i] = 1.0 / np.linalg.norm(ai, np.inf) self.c = ai / np.linalg.norm(ai, np.inf) self._solved = True #self.dual = self.dual.T.tolist()[0] self.c = self.c.tolist()[0] self.error = bestResult return result
def _initialize_kwargs(self, kwargs): # common kwargs if 'verbose' in kwargs: assert isinstance(kwargs['verbose'], bool), 'verbose needs to be True or False.' self._verbose = kwargs['verbose'] if 'tol' in kwargs: assert isinstance(kwargs['tol'], int), 'tolerance needs to be an integer.' self.tol = kwargs['tol'] # class specific kwargs if 'p' in kwargs: assert isinstance( kwargs['p'], int) or kwargs['p'] is 'inf', 'p needs to be an integer' self.p = kwargs['p'] return kwargs
def _build_graph(self, image_size): self.image_size = image_size self.images = tf.placeholder(tf.float32, shape = (None, image_size, image_size, 3)) images_mini = tf.image.resize_images(self.images, size = (int(image_size/4), int(image_size/4))) self.images_blur = tf.image.resize_images(images_mini, size = (image_size, image_size)) self.net = U_Net(output_ch = 3, block_fn = 'origin') self.images_reconst = self.net(self.images_blur, reuse = False) # self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images. self.loss = tf.reduce_mean((self.images_reconst - self.images)**2) self.opt = tf.train.AdamOptimizer()\ .minimize(self.loss, var_list = self.net.vars) self.saver = tf.train.Saver() self.sess.run(tf.global_variables_initializer())
def test_pitch_estimation(self): """ test pitch estimation algo with contrived small example if pitch is within 5 Hz, then say its good (for this small example, since the algorithm wasn't made for this type of synthesized signal) """ cfg = ExperimentConfig(pitch_strength_thresh=-np.inf) # the next 3 variables are in Hz tolerance = 5 fs = 48000 f = 150 # create a sine wave of f Hz freq sampled at fs Hz x = np.sin(2*np.pi * f/fs * np.arange(2**10)) # estimate the pitch, it should be close to f p, t, s = pest.pitch_estimation(x, fs, cfg) self.assertTrue(np.all(np.abs(p - f) < tolerance))
def test_PlotCurveItem(): p = pg.GraphicsWindow() p.ci.layout.setContentsMargins(4, 4, 4, 4) # default margins vary by platform v = p.addViewBox() p.resize(200, 150) data = np.array([1,4,2,3,np.inf,5,7,6,-np.inf,8,10,9,np.nan,-1,-2,0]) c = pg.PlotCurveItem(data) v.addItem(c) v.autoRange() # Check auto-range works. Some platform differences may be expected.. checkRange = np.array([[-1.1457564053237301, 16.145756405323731], [-3.076811473165955, 11.076811473165955]]) assert np.allclose(v.viewRange(), checkRange) assertImageApproved(p, 'plotcurveitem/connectall', "Plot curve with all points connected.") c.setData(data, connect='pairs') assertImageApproved(p, 'plotcurveitem/connectpairs', "Plot curve with pairs connected.") c.setData(data, connect='finite') assertImageApproved(p, 'plotcurveitem/connectfinite', "Plot curve with finite points connected.") c.setData(data, connect=np.array([1,1,1,0,1,1,0,0,1,0,0,0,1,1,0,0])) assertImageApproved(p, 'plotcurveitem/connectarray', "Plot curve with connection array.")
def test_rescaleData(): dtypes = map(np.dtype, ('ubyte', 'uint16', 'byte', 'int16', 'int', 'float')) for dtype1 in dtypes: for dtype2 in dtypes: data = (np.random.random(size=10) * 2**32 - 2**31).astype(dtype1) for scale, offset in [(10, 0), (10., 0.), (1, -50), (0.2, 0.5), (0.001, 0)]: if dtype2.kind in 'iu': lim = np.iinfo(dtype2) lim = lim.min, lim.max else: lim = (-np.inf, np.inf) s1 = np.clip(float(scale) * (data-float(offset)), *lim).astype(dtype2) s2 = pg.rescaleData(data, scale, offset, dtype2) assert s1.dtype == s2.dtype if dtype2.kind in 'iu': assert np.all(s1 == s2) else: assert np.allclose(s1, s2)
def time_slice(self, t_start, t_stop): ''' Creates a new :class:`Event` corresponding to the time slice of the original :class:`Event` between (and including) times :attr:`t_start` and :attr:`t_stop`. Either parameter can also be None to use infinite endpoints for the time interval. ''' _t_start = t_start _t_stop = t_stop if t_start is None: _t_start = -np.inf if t_stop is None: _t_stop = np.inf indices = (self >= _t_start) & (self <= _t_stop) new_evt = self[indices] return new_evt
def time_slice(self, t_start, t_stop): ''' Creates a new :class:`Epoch` corresponding to the time slice of the original :class:`Epoch` between (and including) times :attr:`t_start` and :attr:`t_stop`. Either parameter can also be None to use infinite endpoints for the time interval. ''' _t_start = t_start _t_stop = t_stop if t_start is None: _t_start = -np.inf if t_stop is None: _t_stop = np.inf indices = (self >= _t_start) & (self <= _t_stop) new_epc = self[indices] new_epc.durations = self.durations[indices] new_epc.labels = self.labels[indices] return new_epc
def time_slice(self, t_start, t_stop): ''' Creates a new :class:`SpikeTrain` corresponding to the time slice of the original :class:`SpikeTrain` between (and including) times :attr:`t_start` and :attr:`t_stop`. Either parameter can also be None to use infinite endpoints for the time interval. ''' _t_start = t_start _t_stop = t_stop if t_start is None: _t_start = -np.inf if t_stop is None: _t_stop = np.inf indices = (self >= _t_start) & (self <= _t_stop) new_st = self[indices] new_st.t_start = max(_t_start, self.t_start) new_st.t_stop = min(_t_stop, self.t_stop) if self.waveforms is not None: new_st.waveforms = self.waveforms[indices] return new_st
def __call__(self, params): print '???', params sd1 = params[0] sd2 = params[1] cor = params[2] if sd1 < 0. or sd1 > 10. or sd2 < 0. or sd2 > 10. or cor < -1. or cor > 1.: return np.inf bandwidth = maths.stats.choleskysqrt2d(sd1, sd2, cor) bandwidthdet = la.det(bandwidth) bandwidthinv = la.inv(bandwidth) diff = sample[self.__iidx] - sample[self.__jidx] temp = diff.dot(bandwidthinv.T) temp *= temp e = np.exp(np.sum(temp, axis=1)) s = np.sum(e**(-.25) - 4 * e**(-.5)) cost = self.__n / bandwidthdet + (2. / bandwidthdet) * s print '!!!', cost return cost / 10000.
def test_ecdf_formal_custom(): assert dcst.ecdf_formal(0.1, [0, 1, 2, 3]) == 0.25 assert dcst.ecdf_formal(-0.1, [0, 1, 2, 3]) == 0.0 assert dcst.ecdf_formal(0.1, [3, 2, 0, 1]) == 0.25 assert dcst.ecdf_formal(-0.1, [3, 2, 0, 1]) == 0.0 assert dcst.ecdf_formal(2, [3, 2, 0, 1]) == 0.75 assert dcst.ecdf_formal(1, [3, 2, 0, 1]) == 0.5 assert dcst.ecdf_formal(3, [3, 2, 0, 1]) == 1.0 assert dcst.ecdf_formal(0, [3, 2, 0, 1]) == 0.25 with pytest.raises(RuntimeError) as excinfo: dcst.ecdf_formal([np.nan, np.inf], [0, 1, 2, 3]) excinfo.match('Input cannot have NaNs.') correct = np.array([1.0, 1.0]) result = dcst.ecdf_formal([3.1, np.inf], [3, 2, 0, 1]) assert np.allclose(correct, result, atol=atol)
def test_draw_bs_pairs_linreg_nan(): x = np.array([]) y = np.array([]) with pytest.raises(RuntimeError) as excinfo: dcst.draw_bs_pairs_linreg(x, y, size=1) excinfo.match('Arrays must have at least 2 mutual non-NaN entries.') x = np.array([np.nan]) y = np.array([np.nan]) with pytest.raises(RuntimeError) as excinfo: dcst.draw_bs_pairs_linreg(x, y, size=1) excinfo.match('Arrays must have at least 2 mutual non-NaN entries.') x = np.array([np.nan, 1]) y = np.array([1, np.nan]) with pytest.raises(RuntimeError) as excinfo: dcst.draw_bs_pairs_linreg(x, y, size=1) excinfo.match('Arrays must have at least 2 mutual non-NaN entries.') x = np.array([0, 1, 5]) y = np.array([1, np.inf, 3]) with pytest.raises(RuntimeError) as excinfo: dcst.draw_bs_pairs_linreg(x, y, size=1) excinfo.match('All entries in arrays must be finite.')
def test_pearson_r_edge(): x = np.array([]) y = np.array([]) with pytest.raises(RuntimeError) as excinfo: dcst.pearson_r(x, y) excinfo.match('Arrays must have at least 2 mutual non-NaN entries.') x = np.array([np.nan]) y = np.array([np.nan]) with pytest.raises(RuntimeError) as excinfo: dcst.pearson_r(x, y) excinfo.match('Arrays must have at least 2 mutual non-NaN entries.') x = np.array([np.nan, 1]) y = np.array([1, np.nan]) with pytest.raises(RuntimeError) as excinfo: dcst.pearson_r(x, y) excinfo.match('Arrays must have at least 2 mutual non-NaN entries.') x = np.array([0, 1, 5]) y = np.array([1, np.inf, 3]) with pytest.raises(RuntimeError) as excinfo: dcst.pearson_r(x, y) excinfo.match('All entries in arrays must be finite.')
def make_3d_mask(img_shape, center, radius, shape='sphere'): mask = np.zeros(img_shape) radius = np.rint(radius) center = np.rint(center) sz = np.arange(int(max(center[0] - radius, 0)), int(max(min(center[0] + radius + 1, img_shape[0]), 0))) sy = np.arange(int(max(center[1] - radius, 0)), int(max(min(center[1] + radius + 1, img_shape[1]), 0))) sx = np.arange(int(max(center[2] - radius, 0)), int(max(min(center[2] + radius + 1, img_shape[2]), 0))) sz, sy, sx = np.meshgrid(sz, sy, sx) if shape == 'cube': mask[sz, sy, sx] = 1. elif shape == 'sphere': distance2 = ((center[0] - sz) ** 2 + (center[1] - sy) ** 2 + (center[2] - sx) ** 2) distance_matrix = np.ones_like(mask) * np.inf distance_matrix[sz, sy, sx] = distance2 mask[(distance_matrix <= radius ** 2)] = 1 elif shape == 'gauss': z, y, x = np.ogrid[:mask.shape[0], :mask.shape[1], :mask.shape[2]] distance = ((z - center[0]) ** 2 + (y - center[1]) ** 2 + (x - center[2]) ** 2) mask = np.exp(- 1. * distance / (2 * radius ** 2)) mask[(distance > 3 * radius ** 2)] = 0 return mask
def __init__(self, datadir, target_size = 108, image_size = 64, split = 5, num_utilize = np.inf): self.datadir = datadir self.target_size = target_size self.image_size = image_size self.split = split self.image_paths = [] for d in self.datadir: self.image_paths += glob(d + '/*.jpg') self.data_size = min(len(self.image_paths), num_utilize) print('data size : {}'.format(self.data_size)) self.image_paths = np.random.choice(self.image_paths, self.data_size, replace = False) self.data = None
def SLcomputePSNR(X, Xnoisy): """ SLcomputePSNR Compute peak signal to noise ratio (PSNR). Usage: PSNR = SLcomputePSNR(X, Xnoisy) Input: X: 2D or 3D signal. Xnoisy: 2D or 3D noisy signal. Output: PSNR: The peak signal to noise ratio (in dB). """ MSEsqrt = np.linalg.norm(X-Xnoisy) / np.sqrt(X.size) if MSEsqrt == 0: return np.inf else: return 20 * np.log10(255 / MSEsqrt)
def decoding(self, src_encodings): src_len = len(src_encodings) # NOTE: should transpose before calling `mst` method! s_arc, s_label = self.cal_scores(src_encodings) s_arc_values = s_arc.npvalue().transpose() # src_len, src_len s_label_values = np.asarray([x.npvalue() for x in s_label]).transpose((2, 1, 0)) # src_len, src_len, n_labels # weights = np.zeros((src_len + 1, src_len + 1)) # weights[0, 1:(src_len + 1)] = np.inf # weights[1:(src_len + 1), 0] = np.inf # weights[1:(src_len + 1), 1:(src_len + 1)] = s_arc_values[batch] weights = s_arc_values pred_heads = mst(weights) pred_labels = [np.argmax(labels[head]) for head, labels in zip(pred_heads, s_label_values)] return pred_heads, pred_labels
def __init__(self, image, freq, pixelsize, ra0, dec0, minvalue=1e-4, maxvalue=np.inf, mask=None, projection="CAR"): self.image = image # [K] (brightness temperature) self.freq = freq # [MHz] self.pixelsize = pixelsize # [arcsec] self.ra0 = ra0 # [deg] self.dec0 = dec0 # [deg] self.minvalue = minvalue self.maxvalue = maxvalue self.mask = mask self.projection = projection logger.info("SkyModel: Loaded image @ %.2f [MHz], " % freq + "%.1f [arcsec/pixel]" % pixelsize) logger.info("Image size: %dx%d" % self.shape) logger.info("FoV size: %.2fx%.2f [deg^2]" % self.fov)
def make_data_frame(words, years, feature_dict): """ Makes a pandas dataframe for word, years, and dictionary of feature funcs. Each feature func should take (word, year) and return feature value. Constructed dataframe has flat csv style structure and missing values are removed. """ temp = collections.defaultdict(list) feature_dict["word"] = lambda word, year : word feature_dict["year"] = lambda word, year : year for word in words: for year in years: for feature, feature_func in feature_dict.iteritems(): temp[feature].append(feature_func(word, year)) df = pd.DataFrame(temp) df = df.replace([np.inf, -np.inf], np.nan) df = df.dropna() return df
def run_test_episode(env, policy, episode_len=np.inf, render=False): """ Run an episode and return the reward """ episode_itr = 0 total_reward = 0.0 done = False obs = env.reset() while not done and episode_itr < episode_len: if render: env.render() obs = apply_prediction_preprocessors(policy, obs) action = policy.predict(obs) action = apply_prediction_postprocessors(policy, action) obs, reward, done, _ = env.step(action) total_reward += reward episode_itr += 1 return total_reward
def logpdf(self, samples): ''' Calculates the log of the probability density function. Parameters ---------- samples : array_like n-by-2 matrix of samples where n is the number of samples. Returns ------- vals : ndarray Log of the probability density function evaluated at `samples`. ''' samples = np.copy(np.asarray(samples)) samples = self.__rotate_input(samples) inner = np.all(np.bitwise_and(samples > 0.0, samples < 1.0), axis=1) outer = np.invert(inner) vals = np.zeros(samples.shape[0]) vals[inner] = self._logpdf(samples[inner, :]) # Assign zero mass to border vals[outer] = -np.inf return vals
def _logcdf(self, samples): lower = np.full(2, -np.inf) upper = norm.ppf(samples) limit_flags = np.zeros(2) if upper.shape[0] > 0: def func1d(upper1d): ''' Calculates the multivariate normal cumulative distribution function of a single sample. ''' return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1] vals = np.apply_along_axis(func1d, -1, upper) else: vals = np.empty((0, )) old_settings = np.seterr(divide='ignore') vals = np.log(vals) np.seterr(**old_settings) vals[np.any(samples == 0.0, axis=1)] = -np.inf vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1]) vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0]) return vals
def munch(self, data, panel_params): ranges = self.range(panel_params) data.loc[data['x'] == -np.inf, 'x'] = ranges.x[0] data.loc[data['x'] == np.inf, 'x'] = ranges.x[1] data.loc[data['y'] == -np.inf, 'y'] = ranges.y[0] data.loc[data['y'] == np.inf, 'y'] = ranges.y[1] dist = self.distance(data['x'], data['y'], panel_params) bool_idx = data['group'].iloc[1:].values != \ data['group'].iloc[:-1].values dist[bool_idx] = np.nan # Munch munched = munch_data(data, dist) return munched
def test_remove_missing(): df = pd.DataFrame({'a': [1.0, np.NaN, 3, np.inf], 'b': [1, 2, 3, 4]}) df2 = pd.DataFrame({'a': [1.0, 3, np.inf], 'b': [1, 3, 4]}) df3 = pd.DataFrame({'a': [1.0, 3], 'b': [1, 3]}) with warnings.catch_warnings(record=True) as w: res = remove_missing(df, na_rm=True, vars=['b']) res.equals(df) res = remove_missing(df) res.equals(df2) res = remove_missing(df, na_rm=True, finite=True) res.equals(df3) assert len(w) == 1
def test_removes_infinite_values(): df = mtcars.copy() df.loc[[0, 5], 'wt'] = [np.inf, -np.inf] p = ggplot(df, aes(x='wt')) + geom_bar() with pytest.warns(UserWarning) as record: p._build() def removed_2_row_with_infinites(record): for item in record: msg = str(item.message).lower() if '2 rows' in msg and 'non-finite' in msg: return True return False assert removed_2_row_with_infinites(record)
def process(self, **kwargs): """Process module.""" self._times = kwargs[self.key('dense_times')] self._alpha = kwargs[self.key('alpha')] self._beta = kwargs[self.key('beta')] self._t_peak = kwargs[self.key('tpeak')] self._lum_scale = kwargs[self.key('lumscale')] self._rest_t_explosion = kwargs[self.key('resttexplosion')] ts = [ np.inf if self._rest_t_explosion > x else (x - self._rest_t_explosion) for x in self._times ] luminosities = [ self._lum_scale * (1.0 - np.exp(-t / self._t_peak)) ** self._alpha * (t / self._t_peak) ** (-self._beta) for t in ts ] luminosities = [0.0 if isnan(x) else x for x in luminosities] return {self.dense_key('luminosities'): luminosities}
def process(self, **kwargs): """Process module.""" self._times = kwargs[self.key('dense_times')] self._mnickel = kwargs[self.key('fnickel')] * kwargs[ self.key('mejecta')] self._rest_t_explosion = kwargs[self.key('resttexplosion')] # From 1994ApJS...92..527N ts = [ np.inf if self._rest_t_explosion > x else (x - self._rest_t_explosion) for x in self._times ] luminosities = [ self._mnickel * (self.NI56_LUM * np.exp(-t / self.NI56_LIFE) + self.CO56_LUM * np.exp(-t / self.CO56_LIFE)) for t in ts ] luminosities = [0.0 if isnan(x) else x for x in luminosities] return {self.dense_key('luminosities'): luminosities}
def pick_n_hidden(data, repeat=1, verbose=False, **kwargs): """A helper function to pick the number of hidden factors / clusters to use.""" # TODO: Use an efficient search strategy max_score = - np.inf n = 1 all_scores = [] while True: scores = [] for _ in range(repeat): out = Corex(n_hidden=n, gpu=False, **kwargs).fit(data) m = out.moments scores.append(m["TC_no_overlap"]) score = max(scores) if verbose: print(("n: {}, score: {}".format(n, score))) all_scores.append((score, n)) if score < 0.95 * max_score: break else: n += 1 if score > max_score: max_score = score return all_scores
def help_generate_np_gives_adversarial_example(self, ord): x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=.5, ord=ord, clip_min=-5, clip_max=5) if ord == np.inf: delta = np.max(np.abs(x_adv - x_val), axis=1) elif ord == 1: delta = np.sum(np.abs(x_adv - x_val), axis=1) elif ord == 2: delta = np.sum(np.square(x_adv - x_val), axis=1)**.5 self.assertClose(delta, 0.5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.5)
def test_attack_strength(self): """ If clipping is not done at each iteration (not passing clip_min and clip_max to fgm), this attack fails by np.mean(orig_labels == new_labels) == .39. """ x_val = np.random.rand(100, 2) x_val = np.array(x_val, dtype=np.float32) x_adv = self.attack.generate_np(x_val, eps=1.0, ord=np.inf, clip_min=0.5, clip_max=0.7, nb_iter=5) orig_labs = np.argmax(self.sess.run(self.model(x_val)), axis=1) new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1) self.assertTrue(np.mean(orig_labs == new_labs) < 0.1)
def ExpM(self): """ Approximate a signal via element-wise exponentiation. As appears in : S.I. Mimilakis, K. Drossos, T. Virtanen, and G. Schuller, "Deep Neural Networks for Dynamic Range Compression in Mastering Applications," in proc. of the 140th Audio Engineering Society Convention, Paris, 2016. Args: sTarget: (2D ndarray) Magnitude Spectrogram of the target component nResidual: (2D ndarray) Magnitude Spectrogram of the residual component Returns: mask: (2D ndarray) Array that contains time frequency gain values """ print('Exponential mask') self._mask = np.divide(np.log(self._sTarget.clip(self._eps, np.inf)**self._alpha),\ np.log(self._nResidual.clip(self._eps, np.inf)**self._alpha))
def prior_contribution_phylogeny_parameters(self, state): """ Evaluate prior probability of phylogeny mean/std Log scale. """ mean_prior = scipy.stats.norm.logpdf( state.phylogeny_mean, loc = self.phylogeny_lambda_l, scale = np.sqrt(self.phylogeny_mean_hyperprior_variance) ) if (0. <= state.phylogeny_std and state.phylogeny_std <= self.phylogeny_std_upper_bound): std_prior = -1.0*np.log(self.phylogeny_std_upper_bound) else: std_prior = -np.inf return mean_prior + std_prior
def getError(self): #Test function fx = lambda x, y: np.sin(2*np.pi*x) fy = lambda x, y: np.sin(2*np.pi*y) sol = lambda x, y: 2*np.pi*(np.cos(2*np.pi*x)+np.cos(2*np.pi*y)) Fc = cartF2(self.M, fx, fy) F = self.M.projectFaceVector(Fc) divF = self.M.faceDiv.dot(F) divF_ana = call2(sol, self.M.gridCC) err = np.linalg.norm((divF-divF_ana), np.inf) # self.M.plotImage(divF-divF_ana, showIt=True) return err
def getError(self): # fun: i (cos(y)) + j (cos(z)) + k (cos(x)) # sol: i (sin(z)) + j (sin(x)) + k (sin(y)) funX = lambda x, y, z: np.cos(2*np.pi*y) funY = lambda x, y, z: np.cos(2*np.pi*z) funZ = lambda x, y, z: np.cos(2*np.pi*x) solX = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*z) solY = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*x) solZ = lambda x, y, z: 2*np.pi*np.sin(2*np.pi*y) Ec = cartE3(self.M, funX, funY, funZ) E = self.M.projectEdgeVector(Ec) Fc = cartF3(self.M, solX, solY, solZ) curlE_ana = self.M.projectFaceVector(Fc) curlE = self.M.edgeCurl.dot(E) err = np.linalg.norm((curlE - curlE_ana), np.inf) # err = np.linalg.norm((curlE - curlE_ana)*self.M.area, 2) return err
def getError(self): #Test function fun = lambda x, y, z: (np.cos(x)+np.cos(y)+np.cos(z)) # i (sin(x)) + j (sin(y)) + k (sin(z)) solX = lambda x, y, z: -np.sin(x) solY = lambda x, y, z: -np.sin(y) solZ = lambda x, y, z: -np.sin(z) phi = call3(fun, self.M.gridN) gradE = self.M.nodalGrad.dot(phi) Ec = cartE3(self.M, solX, solY, solZ) gradE_ana = self.M.projectEdgeVector(Ec) err = np.linalg.norm((gradE-gradE_ana), np.inf) return err
def getError(self): #Test function fun = lambda x, y: (np.cos(x)+np.cos(y)) # i (sin(x)) + j (sin(y)) + k (sin(z)) solX = lambda x, y: -np.sin(x) solY = lambda x, y: -np.sin(y) phi = call2(fun, self.M.gridN) gradE = self.M.nodalGrad.dot(phi) Ec = cartE2(self.M, solX, solY) gradE_ana = self.M.projectEdgeVector(Ec) err = np.linalg.norm((gradE-gradE_ana), np.inf) return err
def getError(self): funR = lambda r, z: np.sin(2.*np.pi*r) funZ = lambda r, z: np.sin(2.*np.pi*z) sol = lambda r, t, z: (2*np.pi*r*np.cos(2*np.pi*r) + np.sin(2*np.pi*r))/r + 2*np.pi*np.cos(2*np.pi*z) Fc = cylF2(self.M, funR, funZ) Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]] F = self.M.projectFaceVector(Fc) divF = self.M.faceDiv.dot(F) divF_ana = call3(sol, self.M.gridCC) err = np.linalg.norm((divF-divF_ana), np.inf) return err
def getError(self): funR = lambda r, z: np.sin(2.*np.pi*z) * np.cos(np.pi*r) funZ = lambda r, z: np.sin(3.*np.pi*z) * np.cos(2.*np.pi*r) Fc = cylF2(self.M, funR, funZ) Fc = np.c_[Fc[:, 0], np.zeros(self.M.nF), Fc[:, 1]] F = self.M.projectFaceVector(Fc) aveF = self.M.aveF2CCV * F aveF_anaR = funR(self.M.gridCC[:, 0], self.M.gridCC[:, 2]) aveF_anaZ = funZ(self.M.gridCC[:, 0], self.M.gridCC[:, 2]) aveF_ana = np.hstack([aveF_anaR, aveF_anaZ]) err = np.linalg.norm((aveF-aveF_ana), np.inf) return err
def getError(self): #Test function fx = lambda x: -2*np.pi*np.sin(2*np.pi*x) sol = lambda x: np.cos(2*np.pi*x) xc = sol(self.M.gridCC) gradX_ana = fx(self.M.gridFx) bc = np.array([1,1]) self.M.setCellGradBC('dirichlet') gradX = self.M.cellGrad.dot(xc) + self.M.cellGradBC*bc err = np.linalg.norm((gradX-gradX_ana), np.inf) return err
def getError(self): #Test function fx = lambda x, y: 2*np.pi*np.cos(2*np.pi*x)*np.sin(2*np.pi*y) fy = lambda x, y: 2*np.pi*np.cos(2*np.pi*y)*np.sin(2*np.pi*x) sol = lambda x, y: np.sin(2*np.pi*x)*np.sin(2*np.pi*y) xc = call2(sol, self.M.gridCC) Fc = cartF2(self.M, fx, fy) gradX_ana = self.M.projectFaceVector(Fc) self.M.setCellGradBC('dirichlet') gradX = self.M.cellGrad.dot(xc) err = np.linalg.norm((gradX-gradX_ana), np.inf) return err