我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.sign()。
def sample(): ''' Draw a sample from the distribution of polar angle of the angular momentum vector, :math:`\\theta`, computed using the Monte Carlo technique discussed in the paper. .. plot:: :align: center from planetplanet.photo import theta import matplotlib.pyplot as pl x = [theta.sample() for i in range(10000)] pl.hist(x, bins = 50) pl.xlabel(r'$\\theta$ [deg]', fontweight = 'bold') pl.ylabel('Probability', fontweight = 'bold') pl.show() ''' y = np.random.random() f = lambda x: CDF(x) - y while np.sign(f(0)) == np.sign(f(1)): y = np.random.random() f = lambda x: CDF(x) - y return brentq(f, 0, 1)
def calc_loss(self, states, actions, rewards, next_states, episode_ends): qv = self.agent.q(states) q_t = self.target(next_states) # Q(s', *) max_q_prime = np.array(list(map(np.max, q_t.data)), dtype=np.float32) # max_a Q(s', a) target = cuda.to_cpu(qv.data.copy()) for i in range(self.replay_size): if episode_ends[i][0] is True: _r = np.sign(rewards[i]) else: _r = np.sign(rewards[i]) + self.gamma * max_q_prime[i] target[i, actions[i]] = _r td = Variable(self.target.arr_to_gpu(target)) - qv td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1) zeros = Variable(self.target.arr_to_gpu(np.zeros((self.replay_size, self.target.n_action), dtype=np.float32))) loss = F.mean_squared_error(td_clip, zeros) self._loss = loss.data self._qv = np.max(qv.data) return loss
def __call__(self, x): """ Args: x (FloatTensor/LongTensor or ndarray) Returns: x_mu (LongTensor or ndarray) """ mu = self.qc - 1. if isinstance(x, np.ndarray): x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int) elif isinstance(x, (torch.Tensor, torch.LongTensor)): if isinstance(x, torch.LongTensor): x = x.float() mu = torch.FloatTensor([mu]) x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu) x_mu = ((x_mu + 1) / 2 * mu + 0.5).long() return x_mu
def _step(self, action): obs, reward, done, info = self.env.step(action) obs = self.process_observation(obs) if self.squash_rewards: reward = float(np.sign(reward)) else: reward = float(reward) / float(self.reward_scale) info["frame/lives"] = info["ale.lives"] if self.lives is None: self.lives = info["ale.lives"] else: current_lives = info["ale.lives"] lost = self.lives - current_lives self.lives = current_lives if lost > 0: reward -= lost * self.death_penalty return obs, reward, done, info
def reward(self, a, s1): """Immediate Reward Function.""" reward = 0 s0, s1 = self.data, s1.data # rewards related to states if any(proximity < ProximitySensor.COLLISION_THRESHOLD for proximity in s0[1:]): reward += self.IMMEDIATE_REWARD['collision'] reward += (np.sign(s0[0] - s1[0]) * self.IMMEDIATE_REWARD['position-delta']) if s1[0] < s0[0]: reward_proximity = (self.IMMEDIATE_REWARD['close-to-goal'] * (1 - self.data[0] / 28)) reward += reward_proximity logger.info('distance: %.2f, reward-proximity: %.2f', s0[0], reward_proximity) # rewards related to actions. reward += self.IMMEDIATE_REWARD[a] logger.info('reward: %.2f', reward) return reward
def soft_threshold(X, thresh): """Proximal mapping of l1-norm results in soft-thresholding. Therefore, it is required for the optimisation of the GFGL or IFGL. Parameters ---------- X : ndarray input data of arbitrary shape thresh : float threshold value Returns ------- ndarray soft threshold applied """ return (np.absolute(X) - thresh).clip(0) * np.sign(X)
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = 0.5 * sign(unif(dim, self.rseed) - 0.5) * 4.2096874633 self.scales = (self.condition ** .5) ** np.linspace(0, 1, dim) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(2 * np.abs(self.xopt), curshape) self.arrscales = resize(self.scales, curshape) self.arrsigns = resize(sign(self.xopt), curshape)
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) else: self.xopt = .5 * self._mu1 * sign(gauss(dim, self.rseed)) self.rotation = compute_rotation(self.rseed + 1e6, dim) self.scales = (self.condition ** .5) ** linspace(0, 1, dim) self.linearTF = dot(compute_rotation(self.rseed, dim), diag(self.scales)) # decouple scaling from function definition self.linearTF = dot(self.linearTF, self.rotation) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape # self.arrxopt = resize(self.xopt, curshape) self.arrscales = resize(2. * sign(self.xopt), curshape) # makes up for xopt
def update_measure(self): """updated noise level measure using two fitness lists ``self.fit`` and ``self.fitre``, return ``self.noiseS, all_individual_measures``. Assumes that ``self.idx`` contains the indices where the fitness lists differ. """ lam = len(self.fit) idx = np.argsort(self.fit + self.fitre) ranks = np.argsort(idx).reshape((2, lam)) rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1]) # compute rank change limits using both ranks[0] and ranks[1] r = np.arange(1, 2 * lam) # 2 * lam - 2 elements limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))), self.theta * 50) + Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))), self.theta * 50)) for i in self.idx] # compute measurement # max: 1 rankchange in 2*lambda is always fine s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda self.noiseS += self.cum * (np.mean(s) - self.noiseS) return self.noiseS, s
def compute_pvalues_for_processes(self,U_matrix,chane_prob, num_bootstrapped_stats=100): N = U_matrix.shape[0] bootsraped_stats = np.zeros(num_bootstrapped_stats) # orsetinW = simulate(N,num_bootstrapped_stats,corr) for proc in range(num_bootstrapped_stats): # W = np.sign(orsetinW[:,proc]) W = simulatepm(N,chane_prob) WW = np.outer(W, W) st = np.mean(U_matrix * WW) bootsraped_stats[proc] = N * st stat = N*np.mean(U_matrix) return float(np.sum(bootsraped_stats > stat)) / num_bootstrapped_stats
def get_extrema(data): # find extrema by finding indexes where diff changes sign data_diff = np.diff(data) asign = np.sign(data_diff) signchange = ((np.roll(asign, 1) - asign) != 0).astype(int) # first and last value is always a local extrema signchange[0] = 1 # last value is missing because the diff-array is 1 value shorter than the # input array so we have to add it again signchange = np.append(signchange, np.array([1])) calc_data = data[np.where(signchange != 0)] return calc_data
def compute_mingrad_l1(self,main_rdd,cinfo,K): R = cinfo def maxmin_l1(tpl1,tpl2): (z1,x1,lam1,i1)=tpl1 (z2,x2,lam2,i2)=tpl2 zt = max(abs(z1),abs(z2)) if zt>abs(z2): out = (z1,x1,lam1,i1) else: out = (z2,x2,lam2,i2) return out def CompMingrad(tpl): p=[] for ((tx,lam),index) in tpl: p.append(((np.matrix(tx)*R)[0,0],tx,lam,index)) return p (mingrad,xmin,lambdaMin,iStar)=main_rdd.flatMapValues(CompMingrad).map(lambda (key, value):value).reduce(maxmin_l1) s_star = -np.sign(mingrad) return (mingrad,xmin,lambdaMin,iStar,s_star)
def calc_PrimaryRegion(self,X,Z): """Predicts magnitude and direction of primary field in region""" # CALCULATES INDUCING FIELD WITHIN REGION AND RETURNS AT LOCATIONS # Initiate Variables from object I = self.I a1 = self.a1 eps = 1e-6 mu0 = 4*np.pi*1e-7 # 1e9*mu0 s = np.abs(X) # Define Radial Distance k = 4*a1*s/(Z**2 + (a1+s)**2) Bpx = mu0*np.sign(X)*(Z*I/(2*np.pi*s + eps))*(1/np.sqrt(Z**2 + (a1+s)**2))*(-sp.ellipk(k) + ((a1**2 + Z**2 + s**2)/(Z**2 + (s-a1)**2))*sp.ellipe(k)) Bpz = mu0* ( I/(2*np.pi ))*(1/np.sqrt(Z**2 + (a1+s)**2))*( sp.ellipk(k) + ((a1**2 - Z**2 - s**2)/(Z**2 + (s-a1)**2))*sp.ellipe(k)) Bpx[(X>-1.025*a1) & (X<-0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0. Bpx[(X<1.025*a1) & (X>0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0. Bpz[(X>-1.025*a1) & (X<-0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0. Bpz[(X<1.025*a1) & (X>0.975*a1) & (Z>-0.025*a1) & (Z<0.025*a1)] = 0. Babs = np.sqrt(Bpx**2 + Bpz**2) return Bpx,Bpz,Babs
def make_classifier(estimator, params=None): """Make a classifier for a possible regressor. .. deprecated:: 0.5 Parameters ---------- estimator : sklearn-like class It must contain at least a fit and predict method. params : dict, optional Parameters of the classifier. Returns ------- generic_classifier : class sklearn-like class that is a subclass of estimator. The predict method has been overwritten in order to return only the sign of the results. Note: this assumes that labels are 1 and -1. """ if params is None: params = {} params['predict'] = predict params.setdefault('score', accuracy_score) return type('GenericClassifier', (estimator,), params)()
def inv_mulaw(y, mu=256): """Inverse of mu-law companding (mu-law expansion) .. math:: f^{-1}(x) = sign(y) (1 / \mu) (1 + \mu)^{|y|} - 1) Args: y (array-like): Compressed signal. Each value of input signal must be in range of [-1, 1]. mu (number): Compression parameter ``?``. Returns: array-like: Uncomprresed signal (-1 <= x <= 1) See also: :func:`nnmnkwii.preprocessing.inv_mulaw` :func:`nnmnkwii.preprocessing.mulaw_quantize` :func:`nnmnkwii.preprocessing.inv_mulaw_quantize` """ return _sign(y) * (1.0 / mu) * ((1.0 + mu)**_abs(y) - 1.0)
def vorEdges(vor, far): """ Given a voronoi tesselation, retuns the set of voronoi edges. far is the length of the "infinity" edges """ edges = [] for simplex in vor.ridge_vertices: simplex = numpy.asarray(simplex) if numpy.all(simplex >= 0): edge = {} edge['p1'], edge['p2'] = vor.vertices[simplex, 0], vor.vertices[simplex, 1] edge['p1'] = numpy.array([vor.vertices[simplex, 0][0], vor.vertices[simplex, 1][0]]) edge['p2'] = numpy.array([vor.vertices[simplex, 0][1], vor.vertices[simplex, 1][1]]) edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1']) edges.append(edge) ptp_bound = vor.points.ptp(axis=0) center = vor.points.mean(axis=0) for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices): simplex = numpy.asarray(simplex) if numpy.any(simplex < 0): i = simplex[simplex >= 0][0] # finite end Voronoi vertex t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent t /= numpy.linalg.norm(t) n = numpy.array([-t[1], t[0]]) # normal midpoint = vor.points[pointidx].mean(axis=0) direction = numpy.sign(numpy.dot(midpoint - center, n)) * n far_point = vor.vertices[i] + direction * ptp_bound.max() * far edge = {} edge['p1'], edge['p2'] = numpy.array([vor.vertices[i, 0], far_point[0]]), numpy.array( [vor.vertices[i, 1], far_point[1]]) edge['p1'], edge['p2'] = vor.vertices[i, :], far_point edge['t'] = (edge['p2'] - edge['p1']) / numpy.linalg.norm(edge['p2'] - edge['p1']) edges.append(edge) return edges
def __call__(self, x_mu): """ Args: x_mu (FloatTensor/LongTensor or ndarray) Returns: x (FloatTensor or ndarray) """ mu = self.qc - 1. if isinstance(x_mu, np.ndarray): x = ((x_mu) / mu) * 2 - 1. x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu elif isinstance(x_mu, (torch.Tensor, torch.LongTensor)): if isinstance(x_mu, torch.LongTensor): x_mu = x_mu.float() mu = torch.FloatTensor([mu]) x = ((x_mu) / mu) * 2 - 1. x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.) / mu return x
def write_load_file(loadfilename, load_nodeID_amp, direction=-3, header_comment="$Generated by GaussExc.py\n"): """write load file :param loadfilename: :param load_nodeID_amp: list of int node ID, float amp :param direction: default = -3 (orientation (1, 2, 3) and sign) :param header_comment: """ from numpy import sign, abs d = abs(direction) dsign = sign(direction) lfile = open(loadfilename, 'w') lfile.write(header_comment) lfile.write("*LOAD_NODE_POINT\n") [lfile.write("%i,%i,1,%.4f\n" % (i, d, dsign * j)) for i, j in load_nodeID_amp] lfile.write("*END\n") lfile.close() return 0
def get_output_p(self, path): # this gives the p_dist for every step: the latent posterior wrt obs_act if self.recurrent: obs_actions = [np.concatenate([path["observations"][:, self.obs_regressed], path["actions"][:, self.act_regressed]], axis=1)] # is this the same?? else: obs_actions = np.concatenate([path["observations"][:, self.obs_regressed], path["actions"][:, self.act_regressed]], axis=1) if self.noisify_traj_coef: obs_actions += np.random.normal(loc=0.0, scale=float(np.mean(np.abs(obs_actions))) * self.noisify_traj_coef, size=np.shape(obs_actions)) if self.use_only_sign: obs_actions = np.sign(obs_actions) if self.policy.latent_name == 'bernoulli': return self._regressor._f_p(obs_actions).flatten() elif self.policy.latent_name == 'normal': return self._regressor._f_pdists(obs_actions).flatten()
def predict_log_likelihood(self, paths, latents): if self.recurrent: observations = np.array([p["observations"][:, self.obs_regressed] for p in paths]) actions = np.array([p["actions"][:, self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=2) # latents must match first 2dim: (batch,time) else: observations = np.concatenate([p["observations"][:, self.obs_regressed] for p in paths]) actions = np.concatenate([p["actions"][:, self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=1) latents = np.concatenate(latents, axis=0) if self.noisify_traj_coef: noise = np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions, axis=0)), cov=np.diag(np.mean(np.abs(obs_actions), axis=0) * self.noisify_traj_coef), size=np.shape(obs_actions)[0]) obs_actions += noise if self.use_only_sign: obs_actions = np.sign(obs_actions) return self._regressor.predict_log_likelihood(obs_actions, latents) # see difference with fit above...
def lowb_mutual(self, paths, times=(0, None)): if self.recurrent: observations = np.array([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths]) actions = np.array([p["actions"][times[0]:times[1], self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=2) latents = np.array([p['agent_infos']['latents'][times[0]:times[1]] for p in paths]) else: observations = np.concatenate([p["observations"][times[0]:times[1], self.obs_regressed] for p in paths]) actions = np.concatenate([p["actions"][times[0]:times[1], self.act_regressed] for p in paths]) obs_actions = np.concatenate([observations, actions], axis=1) latents = np.concatenate([p['agent_infos']["latents"][times[0]:times[1]] for p in paths]) if self.noisify_traj_coef: obs_actions += np.random.multivariate_normal(mean=np.zeros_like(np.mean(obs_actions,axis=0)), cov=np.diag(np.mean(np.abs(obs_actions), axis=0) * self.noisify_traj_coef), size=np.shape(obs_actions)[0]) if self.use_only_sign: obs_actions = np.sign(obs_actions) H_latent = self.policy.latent_dist.entropy(self.policy.latent_dist_info) # sum of entropies latents in return H_latent + np.mean(self._regressor.predict_log_likelihood(obs_actions, latents))
def find_fermi_SPB(cbm_vbm, c, T, tolerance=0.001, tolerance_loose=0.03, alpha=0.02, max_iter=1000): tp = get_tp(c) sgn = np.sign(c) m_eff = np.prod(cbm_vbm[tp]["eff_mass_xx"]) ** (1.0 / 3.0) c *= sgn initial_energy = cbm_vbm[tp]["energy"] fermi = initial_energy + 0.02 iter = 0 for iter in range(max_iter): calc_doping = 4 * pi * (2 * m_eff * m_e * k_B * T / hbar ** 2) ** 1.5 * fermi_integral(0.5, fermi, T, initial_energy) * 1e-6 / e ** 1.5 fermi += alpha * sgn * (calc_doping - c) / abs(c + calc_doping) * fermi relative_error = abs(calc_doping - c) / abs(c) if relative_error <= tolerance: # This here assumes that the SPB generator set the VBM to 0.0 and CBM= gap + scissor if sgn < 0: return fermi else: return -(fermi - initial_energy) if relative_error > tolerance: raise ValueError("could NOT find a corresponding SPB fermi level after {} itenrations".format(max_iter))
def hex2vec(h, ell): """hex2vec(h, ell) generates sign vector of length ell from the hex string h. ell must be <= 4*len(h) (excluding the optional leading "0x") """ if h[0:2] in ['0x', '0X']: h = h[2:] nybble = numpy.array([ [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 1], [0, 1, 0, 0], [0, 1, 0, 1], [ 0, 1, 1, 0], [0, 1, 1, 1], [1, 0, 0, 0], [1, 0, 0, 1], [ 1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 0], [1, 1, 0, 1], [1, 1, 1, 0], [1, 1, 1, 1]]) vec = numpy.ravel(numpy.array([nybble[int(x, 16)] for x in h])) if len(vec) < ell: raise ValueError('hex string too short') return vec[len(vec) - ell:]
def native_pla(x_d, y_d, is_rand=False, repeat=1, eta=1.0): total_update = 0 for rpt in range(0, repeat): w = np.zeros(len(x_d[0])) update_count = 0 all_pass = False index = [i for i in range(len(x_d))] if is_rand: random.shuffle(index) while not all_pass: all_pass = True for t in index: if np.sign(np.inner(x_d[t], w)) != y_d[t]: w += eta * y_d[t] * x_d[t] all_pass = False update_count += 1 total_update += update_count return w, total_update / repeat
def update(self, es, **kwargs): if es.countiter < 2: self.initialize(es) self.fit = es.fit.fit else: ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))] ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))] pt2 = self.index_to_compare - int(self.index_to_compare) # ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use s = 0 if 1 < 3: s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))]) s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)]) s -= es.popsize / 2. s *= 2. / es.popsize # the range was popsize, is 2 self.s = (1 - self.c) * self.s + self.c * s es.sigma *= exp(self.s / self.damp) # es.more_to_write.append(10**(self.s)) #es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2))) # # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2]))) # # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2]))) self.fit = es.fit.fit
def update(self, es, function_values, **kwargs): """the first and second value in ``function_values`` must reflect two mirrored solutions sampled in direction / in opposite direction of the previous mean shift, respectively. """ # TODO: on the linear function, the two mirrored samples lead # to a sharp increase of condition of the covariance matrix. # They should not be used to update the covariance matrix, # if the step-size inreases quickly. This should be fine with # negative updates though. if not self.initialized: self.initialize(es.N, es.opts) if 1 < 3: # use the ranking difference of the mirrors for adaptation # damp = 5 should be fine z = np.where(es.fit.idx == 1)[0][0] - np.where(es.fit.idx == 0)[0][0] z /= es.popsize - 1 # z in [-1, 1] self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent if self.s > 0: es.sigma *= exp(self.s / self.sp.dampup) else: es.sigma *= exp(self.s / self.sp.dampdown) #es.more_to_write.append(10**z)
def update_measure(self): """updated noise level measure using two fitness lists ``self.fit`` and ``self.fitre``, return ``self.noiseS, all_individual_measures``. Assumes that `self.idx` contains the indices where the fitness lists differ """ lam = len(self.fit) idx = np.argsort(self.fit + self.fitre) ranks = np.argsort(idx).reshape((2, lam)) rankDelta = ranks[0] - ranks[1] - np.sign(ranks[0] - ranks[1]) # compute rank change limits using both ranks[0] and ranks[1] r = np.arange(1, 2 * lam) # 2 * lam - 2 elements limits = [0.5 * (Mh.prctile(np.abs(r - (ranks[0, i] + 1 - (ranks[0, i] > ranks[1, i]))), self.theta * 50) + Mh.prctile(np.abs(r - (ranks[1, i] + 1 - (ranks[1, i] > ranks[0, i]))), self.theta * 50)) for i in self.idx] # compute measurement # max: 1 rankchange in 2*lambda is always fine s = np.abs(rankDelta[self.idx]) - Mh.amax(limits, 1) # lives roughly in 0..2*lambda self.noiseS += self.cum * (np.mean(s) - self.noiseS) return self.noiseS, s
def update_market_value(self, price): """Compute the current marke value of the position. This is the current price multiplied by the direction of the trade (r2epresented by the sign of the net number of shares bought and sold). The function also updated the unrealized and realized profits and losses. """ # Compute the mean of the bid and ask price to compute the assumed value # of the position. # # N.B. That the market value is akin to the amount of cash that is would # be injected into the portfolio if the position were liquidated. This # means that if a position is short, then a negative amount will be # injected (i.e. paid out). On the other hand, the current value is the # profit-and-loss on a position relative to the cost basis. self.market_value = self.net * price self.unrealized_pnl = self.market_value - self.cost_basis self.realized_pnl = self.market_value + self.net_tot_incl_comm
def iterate(self, x, eps=32, alp=1.0): num_iter = min(eps + 4, 1.25 * eps) loss = 1.0 x = np.copy(x) while loss > 0 and num_iter > 0: inp = x.reshape((1,) + inp_size) outs = self.f_outputs([inp, 0]) loss = outs[0] print('Loss: ', loss) grads = np.array(outs[1:]).reshape(inp_size) s_grads = np.sign(grads) adv_x = x - alp * s_grads sub_x = np.minimum(x + eps, np.maximum(x - eps, adv_x)) next_x = preprocess_img(np.clip(deprocess_img(sub_x), 0.0, 255.0)) x = next_x confidence = self.mdl.predict(x.reshape((1,) + inp_size))[0][0] print('Current confidence value: ', confidence) #'minval =', min_val) yield (deprocess_img(x), confidence) num_iter -= 1
def mu_law(x, mu=255, int8=False): """A TF implementation of Mu-Law encoding. Args: x: The audio samples to encode. mu: The Mu to use in our Mu-Law. int8: Use int8 encoding. Returns: out: The Mu-Law encoded int8 data. """ out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu) out = tf.floor(out * 128) if int8: out = tf.cast(out, tf.int8) return out
def validate_and_normalize(self, obj, value): rounded_value = FloatRegister.validate_and_normalize(self, obj, value) if rounded_value == 0 and value != 0: # value was rounded off to zero if self.avoid_round_off_to_zero: rounded_value = FloatRegister.validate_and_normalize( self, obj, np.abs(self.increment)*np.sign(value)) obj._logger.warning("Avoided rounding value %.1e of the " "gain register %s to zero. Setting it to %.1e " "instead. ", value, self.name, rounded_value) else: obj._logger.warning("Rounding value %.1e of the " "gain register %s to zero. ", value, self.name) if value > self.max or value < self.min: obj._logger.warning("Requested gain for %s.%s is outside the " "bounds allowed by the hardware. Desired " "gain of %.1e is capped to %.1e. ", obj.name, self.name, value, rounded_value) return rounded_value
def expect_margin(predictions:np.ndarray,answer:np.ndarray): predict_sign=np.sign(predictions) answer_sign = np.sign(answer) margin_array=[] for m in range(answer.shape[0]): row=[] for n in range(answer.shape[1]): a=answer[m,n] p = predictions[m,n] p_s=predict_sign[m,n] a_s=answer_sign[m,n] if p_s==a_s : row.append(min(abs(a),abs(p))) # elif p_s!=a_s or p==0: # row.append(-1*(abs(a)+abs(p))) else: #row.append(-1*abs(a)) row.append(-1 * (abs(a) + abs(p))) margin_array.append(row) margin_array=np.array(margin_array) return np.sum(margin_array,0)
def predict(self, data): '''Perform classification on samples in data. :param data: array-like, shape = [n_samples, n_features] :return target: array-like, shape = [n_samples] Class labels for samples in data. ''' def y_prediction(z): support_vectors_sum = sum([alpha * y * sum([beta * K.compute(z, x) for beta, K in zip(self.beta, self.kernel_set)]) for alpha, x, y in zip(self.alpha, self.__Xfit, self.__Yfit)]) p = support_vectors_sum + self.b if p == 0.0: p = 1.0; return self.class_dict[str(numpy.sign(p))] if not self.fited: raise Exception("Fit classificator before.") return [y_prediction(test_x) for test_x in data]
def quatFromRotMatx(R): """Get a quaternion from a given rotation matrix `R`.""" q = np.zeros(4) q[0] = ( R[0,0] + R[1,1] + R[2,2] + 1) / 4.0 q[1] = ( R[0,0] - R[1,1] - R[2,2] + 1) / 4.0 q[2] = (-R[0,0] + R[1,1] - R[2,2] + 1) / 4.0 q[3] = (-R[0,0] - R[1,1] + R[2,2] + 1) / 4.0 q[q<0] = 0 # Avoid complex number by numerical error. q = np.sqrt(q) q[1] *= np.sign(R[2,1] - R[1,2]) q[2] *= np.sign(R[0,2] - R[2,0]) q[3] *= np.sign(R[1,0] - R[0,1]) return q
def leftOrRight(p,l1,l2): return np.sign((l2[0] - l1[0]) * (p[1] - l1[1]) - (l2[1] - l1[1]) * (p[0] - l1[0]))
def soft_thresh(r, w): return np.sign(w) * np.max(np.abs(w)-r, 0)
def _reward(self, reward): """Change all the positive rewards to 1, negative to -1 and keep zero.""" return np.sign(reward)
def derivative(self, X, Y): return (np.sign(Y) != np.sign(X))
def normalize_hist(hist, norm_method='global-l2'): """ Various normalization methods Refer to: [1] Improving the Fisher Kernel for Large-Scale Image Classifcation, Perronnin et al http://www.robots.ox.ac.uk/~vgg/rg/papers/peronnin_etal_ECCV10.pdf [2] Segmentation Driven Object Detection with Fisher Vectors, Cinbis et al """ # Component-wise mass normalization if norm_method == 'component-wise-mass': raise NotImplementedError('Component-wise-mass normalization_method not implemented') # Component-wise L2 normalization elif norm_method == 'component-wise-l2': return hist / np.max(np.linalg.norm(hist, axis=1), 1e-12) # Global L2 normalization elif norm_method == 'global-l2': return hist / (np.linalg.norm(hist) + 1e-12) # Square rooting / Power Normalization with alpha = 0.5 elif norm_method == 'square-rooting': # Power-normalization followed by L2 normalization as in [2] hist = np.sign(hist) * np.sqrt(np.fabs(hist)) return hist / (np.linalg.norm(hist) + 1e-12) else: raise NotImplementedError('Unknown normalization_method %s' % norm_method)
def demo_plot_stdp(kp=0.1, kd=2): t=np.arange(-100, 101) r = kd/float(kp+kd) kbb = r**(np.abs(t)) k_classic = kbb*np.sign(t) plt.figure(figsize=(6, 2)) with hstack_plots(spacing=0.1, bottom=0.1, left=0.05, right=0.98, xlabel='$t_{post}-t_{pre}$', ylabel='$\Delta w$', sharex=False, sharey=False, show_x=False, remove_ticks=False, grid=True): ax=add_subplot() plt.plot(t, -kbb) plt.title('$sign(\\bar x_t)=sign(\\bar e_t)$') plt.xlabel('$t_{post}-t_{pre}$') add_subplot() plt.plot(t, kbb) plt.title('$sign(\\bar x_t)\\neq sign(\\bar e_t)$') add_subplot() plt.title('Classic STDP Rule') plt.plot(t, k_classic) ax.tick_params(axis='y', labelleft='off') ax.tick_params(axis='x', labelbottom='off') plt.show()
def initialize(self, es): """late initialization using attributes ``N`` and ``popsize``""" r = es.sp.weights.mueff / es.popsize self.index_to_compare = 0.5 * (r**0.5 + 2.0 * (1 - r**0.5) / np.log(es.N + 9)**2) * (es.popsize) # TODO self.index_to_compare = 0.30 * es.popsize # TODO self.damp = 2 - 2 / es.N # sign-rule: 2 self.c = 0.3 # sign-rule needs <= 0.3 self.s = 0 # averaged statistics, usually between -1 and +1
def update(self, es, **kwargs): if es.countiter < 2: self.initialize(es) self.fit = es.fit.fit else: ft1, ft2 = self.fit[int(self.index_to_compare)], self.fit[int(np.ceil(self.index_to_compare))] ftt1, ftt2 = es.fit.fit[(es.popsize - 1) // 2], es.fit.fit[int(np.ceil((es.popsize - 1) / 2))] pt2 = self.index_to_compare - int(self.index_to_compare) # ptt2 = (es.popsize - 1) / 2 - (es.popsize - 1) // 2 # not in use s = 0 if 1 < 3: s += pt2 * sum(es.fit.fit <= self.fit[int(np.ceil(self.index_to_compare))]) s += (1 - pt2) * sum(es.fit.fit < self.fit[int(self.index_to_compare)]) s -= es.popsize / 2. s *= 2. / es.popsize # the range was popsize, is 2 elif 11 < 3: # compare ft with median of ftt s += self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2]) s *= 2 / es.popsize # the range was popsize, is 2 else: # compare ftt j-index of ft s += (1 - pt2) * np.sign(ft1 - ftt1) s += pt2 * np.sign(ft2 - ftt1) self.s = (1 - self.c) * self.s + self.c * s es.sigma *= np.exp(self.s / self.damp) # es.more_to_write.append(10**(self.s)) #es.more_to_write.append(10**((2 / es.popsize) * (sum(es.fit.fit < self.fit[int(self.index_to_compare)]) - (es.popsize + 1) / 2))) # # es.more_to_write.append(10**(self.index_to_compare - sum(self.fit <= es.fit.fit[es.popsize // 2]))) # # es.more_to_write.append(10**(np.sign(self.fit[int(self.index_to_compare)] - es.fit.fit[es.popsize // 2]))) if 11 < 3: import scipy.stats.stats as stats zkendall = stats.kendalltau(list(es.fit.fit) + list(self.fit), len(es.fit.fit) * [0] + len(self.fit) * [1])[0] es.more_to_write.append(10**zkendall) self.fit = es.fit.fit
def update(self, es, function_values, **kwargs): """the first and second value in ``function_values`` must reflect two mirrored solutions. Mirrored solutions must have been sampled in direction / in opposite direction of the previous mean shift, respectively. """ # On the linear function, the two mirrored samples lead # to a sharp increase of the condition of the covariance matrix, # unless we have negative weights (which we have now by default). # Otherwise they should not be used to update the covariance # matrix, if the step-size inreases quickly. if self.initialized is not True: # try again self.initialize(es.N, es.opts) if self.initialized is not True: utils.print_warning("dimension not known, damping set to 4", 'update', 'CMAAdaptSigmaTPA') self.initialized = True if 1 < 3: f_vals = function_values z = sum(f_vals < f_vals[1]) - sum(f_vals < f_vals[0]) z /= len(f_vals) - 1 # z in [-1, 1] elif 1 < 3: # use the ranking difference of the mirrors for adaptation # damp = 5 should be fine z = np.nonzero(es.fit.idx == 1)[0][0] - np.nonzero(es.fit.idx == 0)[0][0] z /= es.popsize - 1 # z in [-1, 1] self.s = (1 - self.sp.c) * self.s + self.sp.c * np.sign(z) * np.abs(z)**self.sp.z_exponent if self.s > 0: es.sigma *= np.exp(self.s / self.sp.dampup) else: es.sigma *= np.exp(self.s / self.sp.dampdown) #es.more_to_write.append(10**z)
def defaultboundaryhandling(x, fac): """Returns a float penalty for being outside of boundaries [-5, 5]""" xoutside = np.maximum(0., np.abs(x) - 5) * sign(x) fpen = fac * np.sum(xoutside**2, -1) # penalty return fpen
def initwithsize(self, curshape, dim): # DIM-dependent initialization if self.dim != dim: if self.zerox: self.xopt = zeros(dim) # TODO: what happens here? else: self.xopt = 5 * sign(compute_xopt(self.rseed, dim)) self.scales = -sign(self.xopt) * (self.alpha ** .5) ** linspace(0, 1, dim) # DIM- and POPSI-dependent initialisations of DIM*POPSI matrices if self.lastshape != curshape: self.dim = dim self.lastshape = curshape self.arrxopt = resize(self.xopt, curshape)
def _evalfull(self, x): fadd = self.fopt curshape, dim = self.shape_(x) # it is assumed x are row vectors if self.lastshape != curshape: self.initwithsize(curshape, dim) fadd = fadd + 5 * np.sum(np.abs(self.scales)) # BOUNDARY HANDLING # move "too" good coordinates back into domain x = np.array(x) # convert x and make a copy of x. # The following may modify x directly. idx_out_of_bounds = (x * self.arrxopt) > 25 # 25 == 5 * 5 x[idx_out_of_bounds] = sign(x[idx_out_of_bounds]) * 5 # TRANSFORMATION IN SEARCH SPACE # COMPUTATION core ftrue = dot(x, self.scales) fval = self.noise(ftrue) # FINALIZE ftrue += fadd fval += fadd return fval, ftrue
def _evalfull(self, x): fadd = self.fopt curshape, dim = self.shape_(x) # it is assumed x are row vectors if self.lastshape != curshape: self.initwithsize(curshape, dim) # BOUNDARY HANDLING xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x) fpen = (10. / dim) * np.sum(xoutside ** 2, -1) fadd = fadd + fpen # TRANSFORMATION IN SEARCH SPACE x = x - self.arrxopt # cannot be replaced with x -= arrxopt! x = dot(x, self.rotation) x = monotoneTFosc(x) x = dot(x, self.linearTF) # COMPUTATION core if len(curshape) < 2: # popsize is one ftrue = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(x, (1, len(x))) + 0.5))))) else: ftrue = np.zeros(curshape[0]) # curshape[0] is popsize for k, i in enumerate(x): # TODO: simplify next line ftrue[k] = np.sum(dot(self.aK, np.cos(dot(self.bK.T, 2 * np.pi * (np.reshape(i, (1, len(i))) + 0.5))))) ftrue = 10. * (ftrue / dim - self.f0) ** 3 try: ftrue = np.hstack(ftrue) except TypeError: pass fval = self.noise(ftrue) # FINALIZE ftrue += fadd fval += fadd return fval, ftrue
def _evalfull(self, x): fadd = self.fopt curshape, dim = self.shape_(x) # it is assumed x are row vectors if self.lastshape != curshape: self.initwithsize(curshape, dim) # TRANSFORMATION IN SEARCH SPACE x = 2 * self.arrsigns * x # makes the below boundary handling effective for coordinates try: x[:, 1:] = x[:, 1:] + .25 * (x[:, :-1] - self.arrxopt[:, :-1]) except IndexError: x[1:] = x[1:] + .25 * (x[:-1] - self.arrxopt[:-1]) x = 100. * (self.arrscales * (x - self.arrxopt) + self.arrxopt) # BOUNDARY HANDLING xoutside = np.maximum(0., np.abs(x) - 500.) * sign(x) # in [-500, 500] fpen = 0.01 * np.sum(xoutside ** 2, -1) fadd = fadd + fpen # COMPUTATION core ftrue = 0.01 * ((418.9828872724339) - np.mean(x * np.sin(np.sqrt(np.abs(x))), -1)) fval = self.noise(ftrue) # FINALIZE ftrue += fadd fval += fadd return fval, ftrue
def _evalfull(self, x): fadd = self.fopt curshape, dim = self.shape_(x) # it is assumed x are row vectors if self.lastshape != curshape: self.initwithsize(curshape, dim) # BOUNDARY HANDLING xoutside = np.maximum(0, np.abs(x) - 5.) * sign(x) fpen = 1e4 * np.sum(xoutside ** 2, -1) fadd = fadd + fpen # TRANSFORMATION IN SEARCH SPACE x = self.arrscales * x # COMPUTATION core s = 1 - .5 / ((dim + 20)**0.5 - 4.1) # tested up to DIM = 160 p in [0.25,0.33] d = 1 # shift [1,3], smaller is more difficult mu2 = -((self._mu1 ** 2 - d) / s) ** .5 ftrue = np.minimum(np.sum((x - self._mu1) ** 2, -1), d * dim + s * np.sum((x - mu2) ** 2, -1)) ftrue = ftrue + 10 * (dim - np.sum(np.cos(2 * np.pi * dot(x - self._mu1, self.linearTF)), -1)) fval = self.noise(ftrue) # FINALIZE ftrue += fadd fval += fadd return fval, ftrue # dictbbob = {'sphere': F1, 'ellipsoid': F2, 'Rastrigin': F3}