我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用math.sqrt()。
def moveOrigin(self, newOrigin, dist=1): dist = math.sqrt((origin.x - newOrigin.x)**2 + (origin.y - newOrigin.y)**2) # ?????? xDist = origin.x - newOrigin.x # ???? yDist = origin.y - newOrigin.y # ???? ratio = dist / distance xMove = abs(xDist) * ratio yMove = abs(yDist) * ratio if xDist > 0: newX = origin.x - xMove else: newX = origin.x + xMove if yDist > 0: newY = origin.y - yMove else: newY = origin.y + yMove return (newX, newY)
def xavier_uniform(tensor, gain=1): """Fills the input Tensor or Variable with values according to the method described in "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010), using a uniform distribution. The resulting tensor will have values sampled from :math:`U(-a, a)` where :math:`a = gain \\times \sqrt{2 / (fan\_in + fan\_out)} \\times \sqrt{3}`. Also known as Glorot initialisation. Args: tensor: an n-dimensional torch.Tensor or autograd.Variable gain: an optional scaling factor Examples: >>> w = torch.Tensor(3, 5) >>> nn.init.xavier_uniform(w, gain=nn.init.calculate_gain('relu')) """ if isinstance(tensor, Variable): xavier_uniform(tensor.data, gain=gain) return tensor fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) std = gain * math.sqrt(2.0 / (fan_in + fan_out)) a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation return tensor.uniform_(-a, a)
def xavier_normal(tensor, gain=1): """Fills the input Tensor or Variable with values according to the method described in "Understanding the difficulty of training deep feedforward neural networks" - Glorot, X. & Bengio, Y. (2010), using a normal distribution. The resulting tensor will have values sampled from :math:`N(0, std)` where :math:`std = gain \\times \sqrt{2 / (fan\_in + fan\_out)}`. Also known as Glorot initialisation. Args: tensor: an n-dimensional torch.Tensor or autograd.Variable gain: an optional scaling factor Examples: >>> w = torch.Tensor(3, 5) >>> nn.init.xavier_normal(w) """ if isinstance(tensor, Variable): xavier_normal(tensor.data, gain=gain) return tensor fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) std = gain * math.sqrt(2.0 / (fan_in + fan_out)) return tensor.normal_(0, std)
def _test_generator(n, func, args): import time print(n, 'times', func.__name__) total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print(round(t1-t0, 3), 'sec,', end=' ') avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print('avg %g, stddev %g, min %g, max %g\n' % \ (avg, stddev, smallest, largest))
def unit_cell_volume(self): """ Calculates unit cell volume of a given MOF object. """ a = self.uc_size[0] b = self.uc_size[1] c = self.uc_size[2] alp = math.radians(self.uc_angle[0]) bet = math.radians(self.uc_angle[1]) gam = math.radians(self.uc_angle[2]) volume = 1 - math.cos(alp)**2 - math.cos(bet)**2 - math.cos(gam)**2 volume += 2 * math.cos(alp) * math.cos(bet) * math.cos(gam) volume = a * b * c * math.sqrt(volume) frac_volume = volume / (a * b * c) self.ucv = volume self.frac_ucv = frac_volume
def dist_to_opt(self): global_state = self._global_state beta = self._beta if self._iter == 0: global_state["grad_norm_avg"] = 0.0 global_state["dist_to_opt_avg"] = 0.0 global_state["grad_norm_avg"] = \ global_state["grad_norm_avg"] * beta + (1 - beta) * math.sqrt(global_state["grad_norm_squared"] ) global_state["dist_to_opt_avg"] = \ global_state["dist_to_opt_avg"] * beta \ + (1 - beta) * global_state["grad_norm_avg"] / (global_state['grad_norm_squared_avg'] + eps) if self._zero_debias: debias_factor = self.zero_debias_factor() self._dist_to_opt = global_state["dist_to_opt_avg"] / debias_factor else: self._dist_to_opt = global_state["dist_to_opt_avg"] if self._sparsity_debias: self._dist_to_opt /= (np.sqrt(self._sparsity_avg) + eps) return
def lr_grad_norm_avg(self): # this is for enforcing lr * grad_norm not # increasing dramatically in case of instability. # Not necessary for basic use. global_state = self._global_state beta = self._beta if "lr_grad_norm_avg" not in global_state: global_state['grad_norm_squared_avg_log'] = 0.0 global_state['grad_norm_squared_avg_log'] = \ global_state['grad_norm_squared_avg_log'] * beta \ + (1 - beta) * np.log(global_state['grad_norm_squared'] + eps) if "lr_grad_norm_avg" not in global_state: global_state["lr_grad_norm_avg"] = \ 0.0 * beta + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps) # we monitor the minimal smoothed ||lr * grad|| global_state["lr_grad_norm_avg_min"] = \ np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) else: global_state["lr_grad_norm_avg"] = global_state["lr_grad_norm_avg"] * beta \ + (1 - beta) * np.log(self._lr * np.sqrt(global_state['grad_norm_squared'] ) + eps) global_state["lr_grad_norm_avg_min"] = \ min(global_state["lr_grad_norm_avg_min"], np.exp(global_state["lr_grad_norm_avg"] / self.zero_debias_factor() ) )
def get_cubic_root(self): # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2 # where x = sqrt(mu). # We substitute x, which is sqrt(mu), with x = y + 1. # It gives y^3 + py = q # where p = (D^2 h_min^2)/(2*C) and q = -p. # We use the Vieta's substution to compute the root. # There is only one real solution y (which is in [0, 1] ). # http://mathworld.wolfram.com/VietasSubstitution.html # eps in the numerator is to prevent momentum = 1 in case of zero gradient p = (self._dist_to_opt + eps)**2 * (self._h_min + eps)**2 / 2 / (self._grad_var + eps) w3 = (-math.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0 w = math.copysign(1.0, w3) * math.pow(math.fabs(w3), 1.0/3.0) y = w - p / 3.0 / (w + eps) x = y + 1 if DEBUG: logging.debug("p %f, den %f", p, self._grad_var + eps) logging.debug("w3 %f ", w3) logging.debug("y %f, den %f", y, w + eps) return x
def update_hyper_param(self): for group in self._optimizer.param_groups: group['momentum'] = self._mu if self._force_non_inc_step == False: group['lr'] = min(self._lr * self._lr_factor, self._lr_grad_norm_thresh / (math.sqrt(self._global_state["grad_norm_squared"] ) + eps) ) elif self._iter > self._curv_win_width: # force to guarantee lr * grad_norm not increasing dramatically. # Not necessary for basic use. Please refer to the comments # in YFOptimizer.__init__ for more details self.lr_grad_norm_avg() debias_factor = self.zero_debias_factor() group['lr'] = min(self._lr * self._lr_factor, 2.0 * self._global_state["lr_grad_norm_avg_min"] \ / (np.sqrt(np.exp(self._global_state['grad_norm_squared_avg_log'] / debias_factor) ) + eps) ) return
def lyr_linear( self, name_, s_x_, idim_, odim_, init_=None, bias_=0., params_di_='params'): ''' dense matrix multiplication, optionally adding a bias vector ''' name_W = name_+'_w' name_B = name_+'_b' self.set_vars(params_di_) if init_ is None: init_ = dict(init_=[1.4/sqrt(idim_+odim_)]) v_W = self.get_variable(name_W, (idim_,odim_), **init_) if bias_ is None: s_ret = T.dot(s_x_, v_W) else: v_B = self.get_variable(name_B, (odim_,), bias_) s_ret = T.dot(s_x_, v_W) + v_B return s_ret
def rasterMaskToGrid( rasterMask ): grid = [] mask = rasterMask['mask'] for y in range(rasterMask['height']): for x in range(rasterMask['width']): if mask[y,x]==0: grid.append([x,y]) grid = np.array(grid,dtype=np.float) if not (rasterMask is None) and rasterMask['hex'] is True: f = math.sqrt(3.0)/2.0 offset = -0.5 if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]): offset = 0.5 for i in range(len(grid)): if (grid[i][1]%2.0==0.0): grid[i][0]-=offset grid[i][1] *= f return grid
def getBestCircularMatch(n): bestc = n*2 bestr = 0 bestrp = 0.0 minr = int(math.sqrt(n / math.pi)) for rp in range(0,10): rpf = float(rp)/10.0 for r in range(minr,minr+3): rlim = (r+rpf)*(r+rpf) c = 0 for y in range(-r,r+1): yy = y*y for x in range(-r,r+1): if x*x+yy<rlim: c+=1 if c == n: return r,rpf,c if c>n and c < bestc: bestrp = rpf bestr = r bestc = c return bestr,bestrp,bestc
def fit_transform(self, X, y=None): """Fit the model with X and apply the dimensionality reduction on X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ U, S, V = self._fit(X) U = U[:, :int(self.n_components_)] if self.whiten: # X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples) U *= sqrt(X.shape[0]) else: # X_new = X * V = U * S * V^T * V = U * S U *= S[:int(self.n_components_)] return U
def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov
def transform(self, X): """Apply the dimensionality reduction on X. X is projected on the first principal components previous extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) """ check_is_fitted(self, 'mean_') X = check_array(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = np.dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params): """Creates a logistic model. Args: model_input: 'batch' x 'num_features' matrix of input features. vocab_size: The number of classes in the dataset. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.""" input_size = vocab_size output_size = FLAGS.hidden_size with tf.name_scope("rbm"): self.weights = tf.Variable(tf.truncated_normal([input_size, output_size], stddev=1.0 / math.sqrt(float(input_size))), name="weights") self.v_bias = tf.Variable(tf.zeros([input_size]), name="v_bias") self.h_bias = tf.Variable(tf.zeros([output_size]), name="h_bias") tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.weights)) tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.v_bias)) tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(self.h_bias))
def evalGradientParameter(self,x, mg): """ Evaluate the gradient for the variational parameter equation at the point x=[u,a,p]. Parameters: - x = [u,a,p] the point at which to evaluate the gradient. - mg the variational gradient (g, atest) being atest a test function in the parameter space (Output parameter) Returns the norm of the gradient in the correct inner product g_norm = sqrt(g,g) """ self.prior.grad(x[PARAMETER], mg) tmp = self.generate_vector(PARAMETER) self.problem.eval_da(x, tmp) mg.axpy(1., tmp) self.misfit.grad(PARAMETER,x,tmp) mg.axpy(1., tmp) self.prior.Msolver.solve(tmp, mg) #self.prior.Rsolver.solve(tmp, mg) return math.sqrt(mg.inner(tmp))
def information_ratio(algo_volatility, algorithm_return, benchmark_return): """ http://en.wikipedia.org/wiki/Information_ratio Args: algorithm_returns (np.array-like): All returns during algorithm lifetime. benchmark_returns (np.array-like): All benchmark returns during algo lifetime. Returns: float. Information ratio. """ if zp_math.tolerant_equals(algo_volatility, 0): return np.nan # The square of the annualization factor is in the volatility, # because the volatility is also annualized, # i.e. the sqrt(annual factor) is in the volatility's numerator. # So to have the the correct annualization factor for the # Sharpe value's numerator, which should be the sqrt(annual factor). # The square of the sqrt of the annual factor, i.e. the annual factor # itself, is needed in the numerator to factor out the division by # its square root. return (algorithm_return - benchmark_return) / algo_volatility
def _test_generator(n, func, args): import time print n, 'times', func.__name__ total = 0.0 sqsum = 0.0 smallest = 1e10 largest = -1e10 t0 = time.time() for i in range(n): x = func(*args) total += x sqsum = sqsum + x*x smallest = min(x, smallest) largest = max(x, largest) t1 = time.time() print round(t1-t0, 3), 'sec,', avg = total/n stddev = _sqrt(sqsum/n - avg*avg) print 'avg %g, stddev %g, min %g, max %g' % \ (avg, stddev, smallest, largest)
def sim_distance(prefs,person1,person2): # shared-items ?? si ={} for item in prefs[person1]: if item in prefs[person2]: si[item]=1 #??????????????0 if(len(si)==0): return 0 #??share-items??????? sum_of_squares = sum([pow(prefs[person1][item]-prefs[person2][item],2) for item in si]) return 1/(1+sqrt(sum_of_squares))
def pearson(v1,v2): sum1 = sum(v1) sum2 = sum(v2) sum1Sq = sum([pow(v,2) for v in v1]) sum2Sq = sum([pow(v,2) for v in v2]) length = len(v1) if len(v1)<len(v2) else len(v2) pSum = sum([v1[i]*v2[i] for i in range(length)]) numerator = pSum-sum1*sum2/len(v1) denominator = sqrt(sum1Sq-pow(sum1,2)/len(v1))*(sum2Sq-pow(sum2,2)/len(v1)) if denominator==0: return 0 return 1-numerator/denominator
def approximateQuadraticArcLengthC(pt1, pt2, pt3): # Approximate length of quadratic Bezier curve using Gauss-Legendre quadrature # with n=3 points for complex points. # # This, essentially, approximates the length-of-derivative function # to be integrated with the best-matching fifth-degree polynomial # approximation of it. # #https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss.E2.80.93Legendre_quadrature # abs(BezierCurveC[2].diff(t).subs({t:T})) for T in sorted(.5, .5±sqrt(3/5)/2), # weighted 5/18, 8/18, 5/18 respectively. v0 = abs(-0.492943519233745*pt1 + 0.430331482911935*pt2 + 0.0626120363218102*pt3) v1 = abs(pt3-pt1)*0.4444444444444444 v2 = abs(-0.0626120363218102*pt1 - 0.430331482911935*pt2 + 0.492943519233745*pt3) return v0 + v1 + v2
def __init__(self, dimension, randn=np.random.randn, debug=False): """pass dimension of the underlying sample space """ try: self.N = len(dimension) std_vec = np.array(dimension, copy=True) except TypeError: self.N = dimension std_vec = np.ones(self.N) if self.N < 10: print('Warning: Not advised to use VD-CMA for dimension < 10.') self.randn = randn self.dvec = std_vec self.vvec = self.randn(self.N) / math.sqrt(self.N) self.norm_v2 = np.dot(self.vvec, self.vvec) self.norm_v = np.sqrt(self.norm_v2) self.vn = self.vvec / self.norm_v self.vnn = self.vn**2 self.pc = np.zeros(self.N) self._debug = debug # plot covariance matrix
def intersectCircle(self, other): "Find the intersection(s) of two circles as list of points" R = self.radius r = other.radius d = dist(self.pos, other.pos) if d > r + R or d == 0 or d < abs(r - R): return [] r2 = r * r x = (d*d + r2 - R*R) / (2*d) ux, uy = delta(self.pos, other.pos, 1) x0, y0 = other.pos x0 += x * ux y0 += x * uy if x < r: y = sqrt(r2 - x*x) return [(x0 - y * uy, y0 + y * ux), (x0 + y * uy, y0 - y * ux)] else: return [(x0, y0)]
def eqn(self, x, n, size): "Calculate paint boundary" if not self.side: n = 1 - n w, h = size y = 0 xc = 0 for d in self.drops: r = d[0] * w / 2 R = 1.1 * r xc += r dx = abs(x - xc) if dx <= R: dy = sqrt(R * R - dx * dx) Y = (h + R) * self.posn(n, *d[1:]) + dy - R if Y > y: y = Y xc += r return y, self.side
def matthews_correl_coeff(ntp, ntn, nfp, nfn): ''' This calculates the Matthews correlation coefficent. https://en.wikipedia.org/wiki/Matthews_correlation_coefficient ''' mcc_top = (ntp*ntn - nfp*nfn) mcc_bot = msqrt((ntp + nfp)*(ntp + nfn)*(ntn + nfp)*(ntn + nfn)) if mcc_bot > 0: return mcc_top/mcc_bot else: return np.nan ####################################### ## VARIABILITY RECOVERY (PER MAGBIN) ## #######################################
def combine_images(generated_images): total, width, height, ch = generated_images.shape cols = int(math.sqrt(total)) rows = math.ceil(float(total)/cols) combined_image = np.zeros((height*rows, width*cols, 3), dtype = generated_images.dtype) for index, image in enumerate(generated_images): i = int(index/cols) j = index % cols combined_image[width*i:width*(i+1), height*j:height*(j+1), :]\ = image return combined_image
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if n < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / n) q = numpy.outer(q, q) return numpy.array([ [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], [ 0.0, 0.0, 0.0, 1.0]])
def pdistance(self, p): """Perpendicular distance between this Segment and a given Point p""" if not isinstance(p, Point): return NotImplemented if self.start == self.end: # Distance from a Point to another Point is length of a segment return Segment(self.start, p).length() s = self.end - self.start if s.x == 0: # Vertical Segment => pdistance is the difference of abscissa return abs(self.start.x - p.x) else: # That's 2-D perpendicular distance formulae (ref: Wikipedia) slope = s.y / s.x # intercept: Crossing with ordinate y-axis intercept = self.start.y - (slope * self.start.x) return abs(slope * p.x - p.y + intercept) / math.sqrt( slope ** 2 + 1)
def distance(ptsA, ptsB): """Function computing the Euclidian distance between two points. Can be 2D or 3D coordinates. Args: ptsA: (list/numpy.array) - 2D/3D coordinates of point A; ptsB: (list/numpy.array) - 2D/3D coordinates of point B; Returns: The Euclidian distance between points A & B. """ if len(ptsA) != len(ptsB): warnings.warn("It seems that the points are not in the same space!") return None if len(ptsA) == 2: return math.sqrt( (ptsA[0]-ptsB[0])**2+(ptsA[1]-ptsB[1])**2 ) if len(ptsA) == 3: return math.sqrt( (ptsA[0]-ptsB[0])**2+(ptsA[1]-ptsB[1])**2+(ptsA[2]-ptsB[2])**2 )
def confidence_interval_dichotomous( point_estimate, sample_size, confidence=.95, bias=False, percentage=True, **kwargs): """Dichotomous confidence interval from sample size and maybe a bias""" alpha = ppf((confidence + 1) / 2, sample_size - 1) p = point_estimate if percentage: p /= 100 margin = sqrt(p * (1 - p) / sample_size) if bias: margin += .5 / sample_size if percentage: margin *= 100 return (point_estimate - alpha * margin, point_estimate + alpha * margin)
def _squares(self): n_series_ = len(self.series) i = 2 if sqrt(n_series_).is_integer(): _x = int(sqrt(n_series_)) _y = int(sqrt(n_series_)) else: while i * i < n_series_: while n_series_ % i == 0: n_series_ = n_series_ / i i = i + 1 _y = int(n_series_) _x = int(n_series_ / len(self.series)) if len(self.series) == 5: _x, _y = 2, 3 if abs(_x - _y) > 2: _sq = 3 while (_x * _y) - 1 < len(self.series): _x, _y = _sq, _sq _sq += 1 return (_x, _y)
def compute(obj, client, task=None): # obj is an instance of C import math # this task and client can use message passing print('process at %s received: %s' % (task.location, obj.n)) yield task.sleep(obj.n) obj.n = math.sqrt(obj.n) # send result back to client yield client.deliver(obj, timeout=5)
def getDist(self, other): ox, oy = other.getCoords() xDist = self.x - ox yDist = self.y - oy return math.sqrt(xDist**2+yDist**2)
def normalise_word_vectors(word_vectors, norm=1.0): """ This method normalises the collection of word vectors provided in the word_vectors dictionary. """ for word in word_vectors: word_vectors[word] /= math.sqrt((word_vectors[word]**2).sum() + 1e-6) word_vectors[word] = word_vectors[word] * norm return word_vectors
def _init_gradients(self, vec_magnitude): """Initialize all gradient vectors to be in random directions with the same magnitude. Args: vec_magnitude (float): Magnitude of all gradient vectors. """ self._grad_vecs = [[(0, 0) for _ in range(self._width_in_squares+1)] for _ in range(self._length_in_squares+1)] """list[list[tuple(float, float)]]: Grid of gradient vectors.""" for x in range(self._width_in_squares+1): for y in range(self._length_in_squares+1): x_val = (random.random() - 0.5) * 2 * vec_magnitude y_val = math.sqrt(vec_magnitude**2 - x_val**2) * random.choice([1, -1]) self._grad_vecs[y][x] = (x_val, y_val)
def weightVariable(shape,std=1.0,name=None): # Create a set of weights initialized with truncated normal random values name = 'weights' if name is None else name return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01): # Receptive Fields Summary try: W = layer.W except: W = layer wp = W.eval().transpose(); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) else: # Convolutional layer already has shape features, channels, iy, ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fig = mpl.figure(figOffset); mpl.clf() # Using image grid from mpl_toolkits.axes_grid1 import ImageGrid grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single') for i in range(0,np.shape(fields)[0]): im = grid[i].imshow(fields[i],cmap=cmap); grid.cbar_axes[0].colorbar(im) mpl.title('%s Receptive Fields' % layer.name) # old way # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) # tiled = [] # for i in range(0,perColumn*perRow,perColumn): # tiled.append(np.hstack(fields2[i:i+perColumn])) # # tiled = np.vstack(tiled) # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar(); mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None): # Output summary try: W = layer.output except: W = layer wp = W.eval(feed_dict=feed_dict); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel() fields = np.reshape(temp,[1]+fieldShape) else: # Convolutional layer already has shape wp = np.rollaxis(wp,3,0) features, channels, iy,ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) tiled = [] for i in range(0,perColumn*perRow,perColumn): tiled.append(np.hstack(fields2[i:i+perColumn])) tiled = np.vstack(tiled) if figOffset is not None: mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01): # Receptive Fields Summary W = layer.W wp = W.eval().transpose(); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape) else: # Convolutional layer already has shape features, channels, iy, ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) fieldsN = min(fields.shape[0],maxFields) perRow = int(math.floor(math.sqrt(fieldsN))) perColumn = int(math.ceil(fieldsN/float(perRow))) fig = mpl.figure(figName); mpl.clf() # Using image grid from mpl_toolkits.axes_grid1 import ImageGrid grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single') for i in range(0,fieldsN): im = grid[i].imshow(fields[i],cmap=cmap); grid.cbar_axes[0].colorbar(im) mpl.title('%s Receptive Fields' % layer.name) # old way # fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) # tiled = [] # for i in range(0,perColumn*perRow,perColumn): # tiled.append(np.hstack(fields2[i:i+perColumn])) # # tiled = np.vstack(tiled) # mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar(); mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def dist(p1, p2): return math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
def dist(self, p1, p2): return math.sqrt((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2)
def declination(self): """Calculate declination of vector in degrees""" return math.degrees(math.atan2(math.sqrt(self.x ** 2 + self.y ** 2), self.z))
def points_for_floor_split(self): """Calculate Poissonly distributed points for stem start points""" array = [] # calculate approx spacing radius for dummy stem self.tree_scale = self.param.g_scale + self.param.g_scale_v stem = Stem(0, None) stem.length = self.calc_stem_length(stem) rad = 2.5 * self.calc_stem_radius(stem) # generate points for _ in range(self.param.floor_splits + 1): point_ok = False while not point_ok: # distance from center proportional for number of splits, tree scale and stem radius dis = sqrt(rand_in_range(0, 1) * self.param.floor_splits / 2.5 * self.param.g_scale * self.param.ratio) # angle random in circle theta = rand_in_range(0, 2 * pi) pos = Vector([dis * cos(theta), dis * sin(theta), 0]) # test point against those already in array to ensure it will not intersect point_m_ok = True for point in array: if (point[0] - pos).magnitude < rad: point_m_ok = False break if point_m_ok: point_ok = True array.append((pos, theta)) return array
def radius_at_offset(self, stem, z_1): """ calculate radius of stem at offset z_1 along it """ n_taper = self.param.taper[stem.depth] if n_taper < 1: unit_taper = n_taper elif n_taper < 2: unit_taper = 2 - n_taper else: unit_taper = 0 taper = stem.radius * (1 - unit_taper * z_1) if n_taper < 1: radius = taper else: z_2 = (1 - z_1) * stem.length if n_taper < 2 or z_2 < taper: depth = 1 else: depth = n_taper - 2 if n_taper < 2: z_3 = z_2 else: z_3 = abs(z_2 - 2 * taper * int(z_2 / (2 * taper) + 0.5)) if n_taper < 2 and z_3 >= taper: radius = taper else: radius = (1 - depth) * taper + depth * sqrt(pow(taper, 2) - pow((z_3 - taper), 2)) if stem.depth == 0: y_val = max(0, 1 - 8 * z_1) flare = self.param.flare * ((pow(100, y_val) - 1) / 100) + 1 radius *= flare return radius