我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.inner()。
def vdot(a, b): """Returns the dot product of two vectors. The input arrays are flattened into 1-D vectors and then it performs inner product of these vectors. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. Returns: cupy.ndarray: Zero-dimensional array of the dot product result. .. seealso:: :func:`numpy.vdot` """ if a.size != b.size: raise ValueError('Axis dimension mismatch') if a.dtype.kind == 'c': a = a.conj() return core.tensordot_core(a, b, None, 1, 1, a.size, ())
def idot(arrays): """ Yields the cumulative array inner product (dot product) of arrays. Parameters ---------- arrays : iterable Arrays to be reduced. Yields ------ online_dot : ndarray See Also -------- numpy.linalg.multi_dot : Compute the dot product of two or more arrays in a single function call, while automatically selecting the fastest evaluation order. """ yield from _ireduce_linalg(arrays, np.dot)
def itensordot(arrays, axes = 2): """ Yields the cumulative array inner product (dot product) of arrays. Parameters ---------- arrays : iterable Arrays to be reduced. axes : int or (2,) array_like * integer_like: If an int N, sum over the last N axes of a and the first N axes of b in order. The sizes of the corresponding axes must match. * (2,) array_like: Or, a list of axes to be summed over, first sequence applying to a, second to b. Both elements array_like must be of the same length. Yields ------ online_tensordot : ndarray See Also -------- numpy.tensordot : Compute the tensordot on two tensors. """ yield from _ireduce_linalg(arrays, np.tensordot, axes = axes)
def spherical_noise(gridData=None, order_max=8, spherical_harmonic_bases=None): ''' Returns order-limited random weights on a spherical surface Parameters ---------- gridData : io.SphericalGrid SphericalGrid containing azimuth and colatitude order_max : int, optional Spherical order limit [Default: 8] Returns ------- noisy_weights : array_like, complex Noisy weigths ''' if spherical_harmonic_bases is None: if gridData is None: raise TypeError('Either a grid or the spherical harmonic bases have to be provided.') gridData = SphericalGrid(*gridData) spherical_harmonic_bases = sph_harm_all(order_max, gridData.azimuth, gridData.colatitude) else: order_max = _np.int(_np.sqrt(spherical_harmonic_bases.shape[1]) - 1) return _np.inner(spherical_harmonic_bases, _np.random.randn((order_max + 1) ** 2) + 1j * _np.random.randn((order_max + 1) ** 2))
def project_verteces(self, mesh, orientation): """Supplement the mesh array with scalars (max and median) for each face projected onto the orientation vector. Args: mesh (np.array): with format face_count x 6 x 3. orientation (np.array): with format 3 x 3. Returns: adjusted mesh. """ mesh[:, 4, 0] = np.inner(mesh[:, 1, :], orientation) mesh[:, 4, 1] = np.inner(mesh[:, 2, :], orientation) mesh[:, 4, 2] = np.inner(mesh[:, 3, :], orientation) mesh[:, 5, 1] = np.max(mesh[:, 4, :], axis=1) mesh[:, 5, 2] = np.median(mesh[:, 4, :], axis=1) sleep(0) # Yield, so other threads get a bit of breathing space. return mesh
def word_sim_test(filename, pos_vectors): delim = ',' actual_sim_list, pred_sim_list = [], [] missed = 0 with open(filename, 'r') as pairs: for pair in pairs: w1, w2, actual_sim = pair.strip().split(delim) try: w1_vec = create_word_vector(w1, pos_vectors) w2_vec = create_word_vector(w2, pos_vectors) pred = float(np.inner(w1_vec, w2_vec)) actual_sim_list.append(float(actual_sim)) pred_sim_list.append(pred) except KeyError: missed += 1 spearman, _ = st.spearmanr(actual_sim_list, pred_sim_list) pearson, _ = st.pearsonr(actual_sim_list, pred_sim_list) return spearman, pearson, missed
def test_einsum_misc(self): # This call used to crash because of a bug in # PyArray_AssignZero a = np.ones((1, 2)) b = np.ones((2, 2, 1)) assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]]) # The iterator had an issue with buffering this reduction a = np.ones((5, 12, 4, 2, 3), np.int64) b = np.ones((5, 12, 11), np.int64) assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b), np.einsum('ijklm,ijn->', a, b)) # Issue #2027, was a problem in the contiguous 3-argument # inner loop implementation a = np.arange(1, 3) b = np.arange(1, 5).reshape(2, 2) c = np.arange(1, 9).reshape(4, 2) assert_equal(np.einsum('x,yx,zx->xzy', a, b, c), [[[1, 3], [3, 9], [5, 15], [7, 21]], [[8, 16], [16, 32], [24, 48], [32, 64]]])
def test_einsum_all_contig_non_contig_output(self): # Issue gh-5907, tests that the all contiguous special case # actually checks the contiguity of the output x = np.ones((5, 5)) out = np.ones(10)[::2] correct_base = np.ones(10) correct_base[::2] = 5 # Always worked (inner iteration is done with 0-stride): np.einsum('mi,mi,mi->m', x, x, x, out=out) assert_array_equal(out.base, correct_base) # Example 1: out = np.ones(10)[::2] np.einsum('im,im,im->m', x, x, x, out=out) assert_array_equal(out.base, correct_base) # Example 2, buffering causes x to be contiguous but # special cases do not catch the operation before: out = np.ones((2, 2, 2))[..., 0] correct_base = np.ones((2, 2, 2)) correct_base[..., 0] = 2 x = np.ones((2, 2), np.float32) np.einsum('ij,jk->ik', x, x, out=out) assert_array_equal(out.base, correct_base)
def test_TakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_equal(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))) assert_equal(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)) assert_equal(np.inner(filled(x, 0), filled(y, 0)), inner(x, y)) assert_equal(np.outer(filled(x, 0), filled(y, 0)), outer(x, y)) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3)
def test_4(self): """ Test of take, transpose, inner, outer products. """ x = self.arange(24) y = np.arange(24) x[5:6] = self.masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1))) assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1)) assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)), self.inner(x, y)) assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)), self.outer(x, y)) y = self.array(['abc', 1, 'def', 2, 3], object) y[2] = self.masked t = self.take(y, [0, 3, 4]) assert t[0] == 'abc' assert t[1] == 2 assert t[2] == 3
def inner(a, b): """ Returns the inner product of a and b for arrays of floating point types. Like the generic NumPy equivalent the product sum is over the last dimension of a and b. Notes ----- The first argument is not conjugated. """ fa = filled(a, 0) fb = filled(b, 0) if len(fa.shape) == 0: fa.shape = (1,) if len(fb.shape) == 0: fb.shape = (1,) return np.inner(fa, fb).view(MaskedArray)
def test__compute_probabilities_loosely(self): b = il.RoughlyOptimized(self.lists, sample_num=3) is_success, p, minimum = b._compute_probabilities( self.lists, self.rankings, ) assert is_success self.assert_almost_equal(p[0], 0.0) self.assert_almost_equal(p[1], 0.0) self.assert_almost_equal(p[2], 1.0) self.assert_almost_equal(b._lambdas[0], 0.5 - 1.0/3) self.assert_almost_equal(b._lambdas[1], 0.5 - 1.0/3 + 1.0/3 - 0.0) self.assert_almost_equal( minimum, np.sum(b._lambdas) + np.inner(p, b._sigmas), ) _, _, minimum = b._compute_probabilities_loosely( self.lists, self.rankings, bias_weight=10.0, ) self.assert_almost_equal( minimum, 10.0 * np.sum(b._lambdas) + np.inner(p, b._sigmas), )
def test__compute_probabilities(self): lists = [[1, 2], [2, 3]] b = il.Optimized(lists, sample_num=3) rankings = [] r = CreditRanking(num_rankers=len(lists), contents=[1, 2]) r.credits = {0: {1: 1.0, 2: 0.5}, 1: {1: 1.0/3, 2: 1.0}} rankings.append(r) r = CreditRanking(num_rankers=len(lists), contents=[2, 1]) r.credits = {0: {1: 1.0, 2: 0.5}, 1: {1: 1.0/3, 2: 1.0}} rankings.append(r) r = CreditRanking(num_rankers=len(lists), contents=[2, 3]) r.credits = {0: {2: 0.5, 3: 1.0/3}, 1: {2: 1.0, 3: 0.5}} rankings.append(r) is_success, p, minimum = b._compute_probabilities(lists, rankings) assert is_success assert (p >= 0).all() assert (p <= 1).all() assert minimum >= 0 self.assert_almost_equal(np.sum(p), 1) self.assert_almost_equal(np.inner([1-1.0/3, -0.5, -0.5], p), 0) self.assert_almost_equal(np.inner([0.5-1.0/3, 0.5-1.0/3, -1+1.0/3], p), 0) self.assert_almost_equal(p[0], 0.4285714273469387) self.assert_almost_equal(p[1], 0.37142857025306114) self.assert_almost_equal(p[2], 0.20000000240000002)
def find_nearest_instance_thread(test_instance_start_index, test_instance_end_index): print test_instance_start_index, test_instance_end_index for test_instance_index in range(test_instance_start_index, test_instance_end_index): # find the nearest training instance with cosine similarity maximal_cosine_similarity = -1 maximal_cosine_similarity_index = 0 for training_instance, training_instance_index in zip(training_data, range(len(training_data))): # compute the cosine similarity # first, compute the inner product inner_product = np.inner(test_data[test_instance_index][0].reshape(-1), training_instance[0].reshape(-1)) normalized_inner_product = inner_product / test_data_lengths[test_instance_index] / training_data_lengths[training_instance_index] if normalized_inner_product > maximal_cosine_similarity: maximal_cosine_similarity = normalized_inner_product maximal_cosine_similarity_index = training_instance_index classified_results[test_instance_index] = maximal_cosine_similarity_index
def find_nearest_instance_subprocess(test_instance_start_index, test_instance_end_index,\ classified_results): # print test_instance_start_index, test_instance_end_index for test_instance_index in range(test_instance_start_index, test_instance_end_index): # find the nearest training instance with cosine similarity maximal_cosine_similarity = -1.0 maximal_cosine_similarity_index = 0 for training_instance, training_instance_index in\ zip(training_data_instances, range(len(training_data_instances))): # compute the cosine similarity # first, compute the inner product inner_product = np.inner(test_data_instances[test_instance_index], training_instance) # second, normalize the inner product normalized_inner_product = inner_product / test_data_lengths[test_instance_index]\ / training_data_lengths[training_instance_index] if normalized_inner_product > maximal_cosine_similarity: maximal_cosine_similarity = normalized_inner_product maximal_cosine_similarity_index = training_instance_index classified_results[test_instance_index] =\ training_data_labels[int(maximal_cosine_similarity_index)]
def calc_partial_factor_scores(Xscaled, Q, col_indices): """ Projects individual scores onto the group-level component. """ print("Calculating factor scores for datasets... ", end='') pfs = [] for i, val in enumerate(col_indices): pfs.append(np.inner(Xscaled[:, val], Q[val, :].T)) pfs = np.array(pfs) print("Done!") return pfs
def get_similar_vector(self, match_vector, match_type, num_similar, oversample, normalize): """Get similar items from an input vector.""" if not match_vector: return [] # search_k defaults to n * n_trees in Annoy - multiply by oversample # don't allow oversample to go below 1, this causes errors in Annoy if oversample < 1: oversample = 1 search_k = int(num_similar * self._annoy_objects[match_type]._ntrees * oversample) similar_items = self._annoy_objects[match_type].get_nns_by_vector( match_vector, num_similar, search_k) # compute inner products, and sort scores = self.get_scores_vector( match_vector, match_type, similar_items, normalize) scores = sorted(scores, key=lambda k: k['score'], reverse=True) return scores[:num_similar]
def _get_Smatrices(self, X, y): Sb = np.zeros((X.shape[1], X.shape[1])) S = np.inner(X.T, X.T) N = len(X) mu = np.mean(X, axis=0) classLabels = np.unique(y) for label in classLabels: classIdx = np.argwhere(y == label).T[0] Nl = len(classIdx) xL = X[classIdx] muL = np.mean(xL, axis=0) muLbar = muL - mu Sb = Sb + Nl * np.outer(muLbar, muLbar) Sbar = S - N * np.outer(mu, mu) Sw = Sbar - Sb self.mean_ = mu return (Sw, Sb)
def convex_hull(points, vind, nind, tind, obj): "super ineffective" cnt = len(points) for a in range(cnt): for b in range(a+1,cnt): for c in range(b+1,cnt): vec1 = points[a] - points[b] vec2 = points[a] - points[c] n = np.cross(vec1, vec2) n /= np.linalg.norm(n) C = np.dot(n, points[a]) inner = np.inner(n, points) pos = (inner <= C+0.0001).all() neg = (inner >= C-0.0001).all() if not pos and not neg: continue obj.out.write("f %i//%i %i//%i %i//%i\n" % ( (vind[a], nind[a], vind[b], nind[b], vind[c], nind[c]) if (inner - C).sum() < 0 else (vind[a], nind[a], vind[c], nind[c], vind[b], nind[b]) ) ) #obj.out.write("f %i/%i/%i %i/%i/%i %i/%i/%i\n" % ( # (vind[a], tind[a], nind[a], vind[b], tind[b], nind[b], vind[c], tind[c], nind[c]) # if (inner - C).sum() < 0 else # (vind[a], tind[a], nind[a], vind[c], tind[c], nind[c], vind[b], tind[b], nind[b]) ) )
def vdot(a, b): """Returns the dot product of two vectors. The input arrays are flattened into 1-D vectors and then it performs inner product of these vectors. Args: a (cupy.ndarray): The first argument. b (cupy.ndarray): The second argument. Returns: cupy.ndarray: Zero-dimensional array of the dot product result. .. seealso:: :func:`numpy.vdot` """ if a.size != b.size: raise ValueError('Axis dimension mismatch') return core.tensordot_core(a, b, None, 1, 1, a.size, ())
def transform(self, X): """ Project the data so as to maximize class separation (large separation between projected class means and small variance within each class). Parameters ---------- X : array-like, shape = [n_samples, n_features] Returns ------- X_new : array, shape = [n_samples, n_components_found_] """ #X = np.asarray(X) #ts = time.time() k = self._get_kernel(X, self.X_fit_) #if self.print_timing: print 'KernelFisher.transform: k took', time.time() - ts #ts = time.time() z = np.inner(self.Z, (k-self.K_mean) ).T #if self.print_timing: print 'KernelFisher.transform: z took', time.time() - ts return z
def test_testTakeTransposeInnerOuter(self): # Test of take, transpose, inner, outer products x = arange(24) y = np.arange(24) x[5:6] = masked x = x.reshape(2, 3, 4) y = y.reshape(2, 3, 4) assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1)))) assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1))) assert_(eq(np.inner(filled(x, 0), filled(y, 0)), inner(x, y))) assert_(eq(np.outer(filled(x, 0), filled(y, 0)), outer(x, y))) y = array(['abc', 1, 'def', 2, 3], object) y[2] = masked t = take(y, [0, 3, 4]) assert_(t[0] == 'abc') assert_(t[1] == 2) assert_(t[2] == 3)
def find_starters(self): """ This function finds a pair of instances. One positive and one negative :param clf: classifier being extracted :return: (x+, x-) a pair of instances """ # perdict = 1 ? inner(x, coef) + intercept_ > 0 : 0 x_n, x_p = (None, None) x_n_found = False x_p_found = False for d in self.X_test: if x_n_found and x_p_found: break if self.query(d) == 1 and (not x_p_found): x_p = d x_p_found = True elif self.query(d) == self.NEG and (not x_n_found): x_n = d x_n_found = True return x_p, x_n
def run(self, peaks, weights=None): """Get smeared values. Args: peaks: weights: Weight factors for "peaks". Now this can be one-dimeansional and multi-dimensional arrays. The last dimension must have the same order as the "peaks". """ smearing_function = self._smearing_function xs = self._xs sigma = self._sigma tmp = smearing_function(xs[:, None], peaks[None, :], sigma) if weights is not None: values = np.inner(tmp, weights) else: values = np.sum(tmp, axis=1) return values
def _create_rotational_weights_for_elements(self, kpoint, transformation_matrix, vectors): """ Parameters ---------- kpoint : 1d array Reciprocal space point in fractional coordinates for PC. vectors : (..., natoms_p * ndims, nbands) array Vectors for SC after translational projection. """ projected_vectors = self._rotational_projector.project_vectors( vectors, kpoint, transformation_matrix) nirreps, natoms_p, nelms, tmp, nbands = projected_vectors.shape shape = (nirreps, natoms_p, nelms, natoms_p, nelms, nbands) weights = np.zeros(shape, dtype=complex) for i in range(nirreps): for j in range(nbands): weights[i, ..., j] = np.inner( np.conj(projected_vectors[i, ..., j]), projected_vectors[i, ..., j]) return weights, projected_vectors
def _hard_monochrome(self, sample): """ Return the monochrome colors corresponding to `sample`, if any. A boolean is also returned, specifying whether or not the saturation is sufficient for non monochrome colors. """ gray_proj = np.inner(sample, Name._GRAY_UNIT) * Name._GRAY_UNIT gray_dist = norm(sample - gray_proj) if gray_dist > 15: return [] colors = [] luminance = np.sum(sample * Name._GRAY_COEFF) if luminance > 45 and luminance < 170: colors.append(self._settings['gray_name']) if luminance <= 50: colors.append(self._settings['black_name']) if luminance >= 170: colors.append(self._settings['white_name']) return colors # Normalized identity (BGR gray) vector.
def _solve_hessian(G, Y, thY, precon, lambda_min): N, T = Y.shape # Compute the derivative of the score psidY = ne.evaluate('(- thY ** 2 + 1.) / 2.') # noqa # Build the diagonal of the Hessian, a. Y_squared = Y ** 2 if precon == 2: a = np.inner(psidY, Y_squared) / float(T) elif precon == 1: sigma2 = np.mean(Y_squared, axis=1) psidY_mean = np.mean(psidY, axis=1) a = psidY_mean[:, None] * sigma2[None, :] diagonal_term = np.mean(Y_squared * psidY) + 1. a[np.diag_indices_from(a)] = diagonal_term else: raise ValueError('precon should be 1 or 2') # Compute the eigenvalues of the Hessian eigenvalues = 0.5 * (a + a.T - np.sqrt((a - a.T) ** 2 + 4.)) # Regularize problematic_locs = eigenvalues < lambda_min np.fill_diagonal(problematic_locs, False) i_pb, j_pb = np.where(problematic_locs) a[i_pb, j_pb] += lambda_min - eigenvalues[i_pb, j_pb] # Invert the transform return (G * a.T - G.T) / (a * a.T - 1.)
def test_picard(): N, T = 2, 10000 rng = np.random.RandomState(42) S = rng.laplace(size=(N, T)) A = rng.randn(N, N) X = np.dot(A, S) for precon in [1, 2]: Y, W = picard(X, precon=precon, verbose=True) # Get the final gradient norm G = np.inner(np.tanh(Y / 2.), Y) / float(T) - np.eye(N) assert_allclose(G, np.zeros((N, N)), atol=1e-7) assert_equal(Y.shape, X.shape) assert_equal(W.shape, A.shape) WA = np.dot(W, A) WA = get_perm(WA)[1] # Permute and scale assert_allclose(WA, np.eye(N), rtol=1e-2, atol=1e-2)
def test_picardo(): N, T = 2, 10000 rng = np.random.RandomState(42) S = rng.laplace(size=(N, T)) A = rng.randn(N, N) X = np.dot(A, S) Y, W = picardo(X, verbose=2) # Get the final gradient norm G = np.inner(np.tanh(Y), Y) / float(T) - np.eye(N) G = (G - G.T) # take skew-symmetric part assert_allclose(G, np.zeros((N, N)), atol=1e-7) assert_equal(Y.shape, X.shape) assert_equal(W.shape, A.shape) WA = np.dot(W, A) WA = get_perm(WA)[1] # Permute and scale assert_allclose(WA, np.eye(N), rtol=1e-2, atol=1e-2)
def interpolate_learned_policy(old_policy, new_policy, interpolate, old_coeff, new_coeff, weight, method): if method is "stack_vel_pos": learned_trajectory = np.zeros(human.shape) for item in inPlay: for index in np.arange(item[0],item[0]+tao): learned_trajectory[index] = human[index] for index in np.arange(item[0]+tao,item[1]+1): feature = autoreg_game_context[index,:] for i in range(tao-1): feature = np.append(feature, learned_trajectory[index-(i+1)] - learned_trajectory[index-(i+2)]) for i in range(tao): feature = np.append(feature,learned_trajectory[index-(i+1)]) previous_prediction = learned_trajectory[index-tao:index].copy() previous_prediction = previous_prediction[::-1] old_model_predict = (old_policy.predict(feature) + np.inner(old_coeff, previous_prediction) * weight) / (1+weight) new_model_predict = (new_policy.predict(feature) + np.inner(new_coeff, previous_prediction) * weight) / (1+weight) #current_prediction = interpolate * new_policy.predict(feature) + (1-interpolate) * old_policy.predict(feature) learned_trajectory[index] = interpolate * new_model_predict + (1-interpolate) * old_model_predict return learned_trajectory
def interpolate_test_policy(old_policy, new_policy, interpolate, reference_path, context, old_coeff, new_coeff, weight, method): Y_predict = np.zeros(reference_path.shape) if method is "stack_vel_pos": for i in range(len(reference_path)): if i<tao: Y_predict[i] = reference_path[i] #note: have the first tau frames correct else: feature = context[i] for j in range(tao-1): feature = np.hstack((feature,Y_predict[i-(j+1)]-Y_predict[i-(j+2)])) for j in range(tao): feature = np.hstack((feature,Y_predict[i-(j+1)])) previous_prediction = Y_predict[i-tao:i] previous_prediction = previous_prediction[::-1] #current_prediction = interpolate * new_policy.predict(feature) + (1-interpolate) * old_policy.predict(feature) old_model_predict = (old_policy.predict(feature) + np.inner(old_coeff, previous_prediction) * weight) / (1+weight) new_model_predict = (new_policy.predict(feature) + np.inner(new_coeff, previous_prediction) * weight) / (1+weight) #Y_predict[i] = (current_prediction + np.inner(coeff,previous_prediction)*weight)/(1+weight) # replace Y_predict[i] = interpolate * new_model_predict + (1-interpolate) * old_model_predict return Y_predict