我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用math.fsum()。
def _get_magnification_w_times(self, source_x, source_y, radius, magnification_center=None): """Evaluates Gould (2008) eq. 8""" shift = radius / sqrt(2.) dx = [1., -1., -1., 1.] dy = [1., 1., -1., -1.] out = [] for (i, dxval) in enumerate(dx): x = source_x + dxval * shift y = source_y + dy[i] * shift out.append(self.point_source_magnification( source_x=x, source_y=y)) if magnification_center is None: magnification_center = self.point_source_magnification( source_x=source_x, source_y=source_y) return 0.25 * fsum(out) - magnification_center
def q(self, new_q): # Update epsilon new_q = np.insert(new_q, 0, 1.) self._epsilon = new_q / fsum(new_q) try: if np.array(new_q).size == self._epsilon.size - 1: # Case 3: the entire lens is defined (new_q changes # the values of q) pass else: # Case 2: the primary is defined (new_q adds masses) if ((self._total_mass is not None) and (self._last_mass_set != 'total_mass')): self._total_mass = self._total_mass * fsum(new_q) except AttributeError: # Case 1: nothing is initialized (new_q directly sets epsilon) pass
def gamma(z, sqrt2pi=(2.0*pi)**0.5): # Reflection to right half of complex plane if z < 0.5: return pi / sin(pi*z) / gamma(1.0-z) # Lanczos approximation with g=7 az = z + (7.0 - 0.5) return az ** (z-0.5) / exp(az) * sqrt2pi * fsum([ 0.9999999999995183, 676.5203681218835 / z, -1259.139216722289 / (z+1.0), 771.3234287757674 / (z+2.0), -176.6150291498386 / (z+3.0), 12.50734324009056 / (z+4.0), -0.1385710331296526 / (z+5.0), 0.9934937113930748e-05 / (z+6.0), 0.1659470187408462e-06 / (z+7.0), ])
def pooled_sample_variance(sample1, sample2): """Find the pooled sample variance for two samples. Args: sample1: one sample. sample2: the other sample. Returns: Pooled sample variance, as a float. """ deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
def trapz(funct, args, a, b): N = 100 #number of steps step = (b-a)/N #step size y = [] #initialize a list of values of y for i in range(1,N-1) #loop through values of x while omitting the first and last points x = a + (step * i) #each subsequent x value will be increased by the step size y.append(funct(x,args)) # call the desired function and pass it the required arguments and x value mid = math.fsum(y) #sum the values of y area = step * ((funct(b,args) - funct(a,args))/2 + mid) #find the area under the curve with the trapezoid #rule for numerical itnegration return area #return the value of area back to the calling function
def sample(self, probs, temperature): if temperature == 0: return np.argmax(probs) probs = probs.astype(np.float64) #convert to float64 for higher precision probs = np.log(probs) / temperature probs = np.exp(probs) / math.fsum(np.exp(probs)) return np.argmax(np.random.multinomial(1, probs, 1)) #generate a sentence given conv_hidden
def mean(data: Iterable[float]) -> float: 'Accurate arithmetic mean' data = list(data) return fsum(data) / len(data)
def dist(p: Point, q: Point, sqrt=sqrt, fsum=fsum, zip=zip) -> float: 'Multi-dimensional euclidean distance' return sqrt(fsum((x1 - x2) ** 2.0 for x1, x2 in zip(p, q)))
def describe(data): 'Simple reducer for descriptive statistics' n = len(data) lo = min(data) hi = max(data) mean = fsum(data) / n std_dev = (fsum((x - mean) ** 2 for x in data) / n) ** 0.5 return Summary(n, lo, mean, hi, std_dev)
def get_chi2(self, fit_blending=None): """ Calculates chi^2 of current model by fitting for source and blending fluxes. Parameters : fit_blending: *boolean*, optional If True, then the blend flux is a free parameter. If False, the blend flux is fixed at zero. Default is the same as :py:func:`MulensModel.fit.Fit.fit_fluxes()`. Returns : chi2: *float* Chi^2 value """ chi2_per_point = self.get_chi2_per_point( fit_blending=fit_blending) # Calculate chi^2 given the fit chi2 = [] for i, dataset in enumerate(self.datasets): # Calculate chi2 for the dataset excluding bad data select = np.logical_not(dataset.bad) chi2.append(fsum(chi2_per_point[i][select])) self.chi2 = fsum(chi2) if self.best_chi2 is None or self.best_chi2 > self.chi2: self.best_chi2 = self.chi2 self.best_chi2_parameters = dict(self.model.parameters.parameters) return self.chi2
def _point_source_WM95(self, source_x, source_y): """calculate point source magnification using Witt & Mao 1995""" return fsum(abs(self._signed_magnification_WM95( source_x=source_x, source_y=source_y)))
def _get_magnification_w_plus(self, source_x, source_y, radius, magnification_center=None): """Evaluates Gould (2008) eq. 7""" dx = [1., 0., -1., 0.] dy = [0., 1., 0., -1.] out = [] for (i, dxval) in enumerate(dx): x = source_x + dxval * radius y = source_y + dy[i] * radius out.append(self.point_source_magnification( source_x=x, source_y=y)) if magnification_center is None: magnification_center = self.point_source_magnification( source_x=source_x, source_y=source_y) return 0.25 * fsum(out) - magnification_center
def bleu(candidate, references, weights): """ Calculate BLEU for a single sentence, comment by atma The result of this code is same as the most popular perl script eg: weight = [0.25, 0.25, 0.25, 0.25] can = 'It is a guide to action which ensures that the military always obeys the commands of the party'.lower().split() ref1 = 'It is a guide to action that ensures that the military will forever heed Party commands'.lower().split() ref2 = 'It is the guiding principle which guarantees the military forces always being under the command of the Party'.lower().split() ref = [ref1, ref2] print bleu(can, ref, weight) :param candidate: word list of one sentence, eg: ['I', 'like', 'eat', 'apple'] :param references: list of ref, each is a list of word, eg [['I', 'like', 'eat', 'apple'],['I', 'like', 'apple']] :param weights: a list of weight :return: return the bleu score """ p_ns = ( MP(candidate, references, i) for i, _ in enumerate(weights, start=1)) s = [] for w, p_n in zip(weights, p_ns): try: s.append(w * math.log(p_n)) except ValueError: s.append(0) s = math.fsum(s) bp = BP(candidate, references) return bp * math.exp(s)
def calculateCentroid(self): if len(self.points) > 0 : # Finds a virtual center point for a group of n-dimensional points numPoints = len(self.points) # Get a list of all coordinates in this cluster coords = [p.coords for p in self.points] print('cluster has: '+ str(numPoints) + ' point') # Reformat that so all x's are together, all y'z etc. unzipped = zip(*coords) # Calculate the mean for each dimension centroid_coords = [math.fsum(dList)/numPoints for dList in unzipped] return Point(centroid_coords,'Centroid') else: return self.centroid
def calc_mean_onbit_density(bitsets, number_of_bits): """Calculate the mean density of bits that are on in bitsets collection. Args: bitsets (list[intbitset.intbitset]): List of fingerprints number_of_bits: Number of bits for all fingerprints Returns: float: Mean on bit density """ all_nr_onbits = [len(v) for v in bitsets] mean_onbit = fsum(all_nr_onbits) / float(len(all_nr_onbits)) density = mean_onbit / number_of_bits return float(density)
def _get_weights(pts): '''Given a number of points in [-1, 1], according to On some Gauss and Lobatto based integration formulae, T. N. L. Patterson, Math. Comp. 22 (1968), 877-881, one can compute the corresponding weights. One reads there: > Thus the weights of an n-point integration formula [...] are given by > > omega_i = int_{-1}^{1} L_i(x) dx, > > (where L_i is the Lagrange polynomial for the point x_i). > These weights can be evaluated exactly in a numerically stable fashion > using a Gauss formula with n/2 points when n is even and (n + 1)/2 points > when n is odd. ''' n = len(pts) # Unnormalized Lagrange polynomial: Degree n, 0 at all x_j except x_i. def L(i, x): return numpy.prod([(x - pts[j]) for j in range(n) if j != i], axis=0) # Gauss-Legendre of order k integrates polynomials of degree 2*k-1 exactly. # L has degree n-1, so k needs to be n/2 if n is even, and (n+1)/2 if n is # odd. k = (n // 2) - 1 if n % 2 == 0 else (n+1) // 2 return numpy.array([ integrate( lambda x, i=i: L(i, x[0]), numpy.array([[-1.0], [1.0]]), GaussLegendre(k), sumfun=lambda a: numpy.array([math.fsum(a)]) )[0] / numpy.prod([(pts[i] - pts[j]) for j in range(n) if j != i]) for i in range(n) ])
def plot_disks_1d(plt, pts, weights, total_area): '''Plot a circles at quadrature points according to weights. The diameters sum up to the total area. ''' radii = 0.5 * abs(weights)/math.fsum(weights) * total_area colors = [ # use matplotlib 2.0's color scheme '#1f77b4' if weight >= 0 else '#d62728' for weight in weights ] _plot_disks_helpers(plt, pts, radii, colors) return
def plot_disks(plt, pts, weights, total_area): '''Plot a circles at quadrature points according to weights. ''' flt = numpy.vectorize(float) pts = flt(pts) weights = flt(weights) radii = numpy.sqrt(abs(weights)/math.fsum(weights) * total_area/math.pi) colors = [ # use matplotlib 2.0's color scheme '#1f77b4' if weight >= 0 else '#d62728' for weight in weights ] _plot_disks_helpers(plt, pts, radii, colors) return
def best_dir(self): right = math.fsum(self.data[0:len(self.data)/2]) left = math.fsum(self.data[len(self.data)/2:]) return 0 if left == right else 1 if left > right else -1
def average(self): return math.fsum(self.timings) / len(self.timings)
def stdev(self): mean = self.average return (math.fsum([(x - mean) ** 2 for x in self.timings]) / len(self.timings)) ** 0.5
def run( mesh, volume, convol_norms, ce_ratio_norms, cellvol_norms, tol=1.0e-12 ): # Check cell volumes. total_cellvolume = fsum(mesh.cell_volumes) assert abs(volume - total_cellvolume) < tol * volume norm2 = numpy.linalg.norm(mesh.cell_volumes, ord=2) norm_inf = numpy.linalg.norm(mesh.cell_volumes, ord=numpy.Inf) assert near_equal(cellvol_norms, [norm2, norm_inf], tol) # If everything is Delaunay and the boundary elements aren't flat, the # volume of the domain is given by # 1/n * edge_lengths * ce_ratios. # Unfortunately, this isn't always the case. #
# total_ce_ratio = \ # fsum(mesh.edge_lengths**2 * mesh.get_ce_ratios_per_edge() / dim) # self.assertAlmostEqual(volume, total_ce_ratio, delta=tol * volume) # ``` # Check ce_ratio norms. # TODO reinstate alpha2 = fsum((mesh.get_ce_ratios()**2).flat) alpha_inf = max(abs(mesh.get_ce_ratios()).flat) assert near_equal(ce_ratio_norms, [alpha2, alpha_inf], tol) # Check the volume by summing over the absolute value of the control # volumes. vol = fsum(mesh.get_control_volumes()) assert abs(volume - vol) < tol*volume # Check control volume norms. norm2 = numpy.linalg.norm(mesh.get_control_volumes(), ord=2) norm_inf = numpy.linalg.norm(mesh.get_control_volumes(), ord=numpy.Inf) assert near_equal(convol_norms, [norm2, norm_inf], tol) return
```
def test_toy_geometric(): filename = download_mesh( 'toy.msh', '1d125d3fa9f373823edd91ebae5f7a81' ) mesh, _, _, _ = voropy.read(filename) mesh = voropy.mesh_tetra.MeshTetra( mesh.node_coords, mesh.cells['nodes'], mode='geometric' ) run( mesh, volume=9.3875504672601107, convol_norms=[0.20175742659663737, 0.0093164692200450819], ce_ratio_norms=[13.497977312281323, 0.42980191511570004], cellvol_norms=[0.091903119589148916, 0.0019959463063558944], tol=1.0e-6 ) cc = mesh.get_cell_circumcenters() cc_norm_2 = fsum(cc.flat) cc_norm_inf = max(cc.flat) assert abs(cc_norm_2 - 1103.7038287583791) < 1.0e-12 assert abs(cc_norm_inf - 3.4234008596539662) < 1.0e-12 return
def calc_arr_norm( v ): mag = sqrt( fsum( [ c**2 for c in v ]) ) return tuple( [ ( c/mag) for c in v ] ) ################################################################################ ## DEFINE THE FUNCTION FOR COMPUTING DOT PRODUCT ################################################################################ # Define the function for computing dot product
def calc_arr_dot( u,v ) : if ( len(u) != len(v) ) : raise TypeError( 'Unequal lengths.' ) return fsum([ x[0]*x[1] for x in zip(u,v) ])
def softmax(x): y = [math.exp(k) for k in x] sum_y = math.fsum(y) z = [k/sum_y for k in y] return z
def fit(self, data): self.data = data self.real_indices = range(len(data)) for i in range(len(data)): self.dic[ (i, i) ] = 0. for j in range(i): self.dic[ (i, j) ] = math.sqrt( math.fsum( ( (a-b)**2 for a, b in zip(self.data[i], self.data[j])) ) ) self.dic[ (j, i) ] = self.dic[ (i, j) ]
def mean_neg_log_likelihood(self): return math.fsum([self.neg_sum_batch_log_likelihood(i) for i in xrange(self.num_batches)]) / self.num_samples # np.sum() has some precision problems here
def mean_unnormalized_neg_log_likelihood(self): return math.fsum([self.unnormalized_neg_sum_batch_log_likelihood(i) for i in xrange(self.num_batches)]) / self.num_samples # np.sum() has some precision problems here
def eval_symb_reg(individual, points, values): try: func = toolbox.compile(expr=individual) sqerrors = [(func(*z) - valx)**2 for z, valx in zip(points, values)] return math.log10(math.sqrt(math.fsum(sqerrors)) / len(points)), except OverflowError: return 1000.0, # register the selection and genetic operators - tournament selection and, one point crossover and sub-tree mutation
def lf_needed_fuel(dv, I_sp, m_p, f_e): m_c = m_p/f_e * ((1/f_e) / (1+(1/f_e)-exp(1/g_0*fsum([dv[i]/I_sp[i] for i in range(len(dv))]))) - 1) if m_c < 0: return None return m_c
def _get_duration(self): duration = self._metadata.get('duration', None) if duration is not None: return duration raw_values = self._get_raw_values(warmups=True) return math.fsum(raw_values)
def get_total_duration(self): durations = [run._get_duration() for run in self._runs] return math.fsum(durations)
def _get_run_property(self, get_property): # ignore calibration runs values = [get_property(run) for run in self._runs if not run._is_calibration()] if len(set(values)) == 1: return values[0] # Compute the mean (float) return math.fsum(values) / len(values)
def get_total_duration(self): durations = [benchmark.get_total_duration() for benchmark in self] return math.fsum(durations)
def test_compare_with_math_fsum(self): # Compare with the math.fsum function. # Ideally we ought to get the exact same result, but sometimes # we differ by a very slight amount :-( data = [random.uniform(-100, 1000) for _ in range(1000)] self.assertApproxEqual(float(self.func(data)[1]), math.fsum(data), rel=2e-16)
def eval_func(individual, points): # Transform the tree expression in a callable function func = toolbox.compile(expr=individual) # Evaluate the mean squared error mse = ((func(x) - (2 * x**3 - 3 * x**2 + 4 * x - 1))**2 for x in points) return math.fsum(mse) / len(points), # Function to create the toolbox
def get_average(self, mount): if not isinstance(mount, int): raise ValueError("Mount must be interger") average = [0, 0, 0, 0, 0] lt_list = [[], [], [], [], []] for times in range(0, mount): lt = self.read_analog() for lt_id in range(0, 5): lt_list[lt_id].append(lt[lt_id]) for lt_id in range(0, 5): average[lt_id] = int(math.fsum(lt_list[lt_id])/mount) return average
def test_compare_with_math_fsum(self): # Compare with the math.fsum function. # Ideally we ought to get the exact same result, but sometimes # we differ by a very slight amount :-( data = [random.uniform(-100, 1000) for _ in range(1000)] self.assertApproxEqual(self.func(data), math.fsum(data), rel=2e-16)
def max_flow(edges, source, sink): """Returns the maximum flow that can be routed from source to sink. Uses the push-relabel algorithm (also known as pre-flow push) to push flow to nodes, then divert any excess flow at the nodes to 'downhill' (lower labeled) nodes until the flow reaches sink. Args: edges: A list of directed edge tuples of the form (start, end, capacity), where start and end both represent nodes, and capacity represents the maximum capacity that can pass through this edge at once. start and end may be strings or numbers, and capacity must be a number. source, sink: Node names identifying the start (source) and end (sink) nodes of the paths. May be numbers or strings. If both names are not included in edges, the maximum flow will be 0. Returns: A floating point number indicating the maximum flow that can be routed from source to sink through edges. """ flow, labels, outgoing_edges, incoming_edges = _initialize(edges, source) # Start with the nodes that are adjacent to source, since they can get flow excess_nodes = [edge[1] for edge in outgoing_edges[source]] while len(excess_nodes) > 0: current = excess_nodes.pop() pushed = _push( outgoing_edges[current], incoming_edges[current], labels, flow) if not (pushed or _relabel(outgoing_edges[current], labels)): # Try next node if nothing could be pushed or relabeled continue # Only check nodes with outgoing edges excess_nodes = [node for node in outgoing_edges if _excess( flow, outgoing_edges[node], incoming_edges[node]) > 0] # Use fsum for precision in case capacities are floats return math.fsum(flow[x] for x in incoming_edges[sink])
def test_clenshaw(tol=1.0e-14): n = 5 _, _, alpha, beta = \ orthopy.line.recurrence_coefficients.legendre(n, 'monic') t = 1.0 a = numpy.ones(n+1) value = orthopy.line.clenshaw(a, alpha, beta, t) ref = math.fsum([ numpy.polyval(legendre(i, monic=True), t) for i in range(n+1)]) assert abs(value - ref) < tol return