我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.eye()。
def initialize_estimate(self, estimate_id, initial_state): """Initialize a state estimate with identity covariance. The initial estimate is saved in the `self.estimates` dictionary. The timestamp in the `self.estimate_times` is updated. Args: estimate_id (int): ID of the tracked target. initial_state (int): Initial state of the estimate. Returns: X (numpy.ndarray): Solution of equation. """ x = initial_state P = self.initial_position_covariance * np.eye(6) P[3:6, 3:6] = 0 estimate = UWBTracker.StateEstimate(x, P) self.estimates[estimate_id] = estimate self.estimate_times[estimate_id] = rospy.get_time() self.ikf_prev_outlier_flags[estimate_id] = False self.ikf_outlier_counts[estimate_id] = 0
def _compute_process_and_covariance_matrices(self, dt): """Computes the transition and covariance matrix of the process model and measurement model. Args: dt (float): Timestep of the discrete transition. Returns: F (numpy.ndarray): Transition matrix. Q (numpy.ndarray): Process covariance matrix. R (numpy.ndarray): Measurement covariance matrix. """ F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]])) self.process_matrix = F q_p = self.process_covariance_position q_v = self.process_covariance_velocity Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt r = self.measurement_covariance R = r * np.eye(4) self.process_covariance = Q self.measurement_covariance = R return F, Q, R
def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov
def infExact_scipy_post(self, K, covars, y, sig2e, fixedEffects): n = y.shape[0] #mean vector m = covars.dot(fixedEffects) if (K.shape[1] < K.shape[0]): K_true = K.dot(K.T) else: K_true = K if sig2e<1e-6: L = la.cholesky(K_true + sig2e*np.eye(n), overwrite_a=True, check_finite=False) #Cholesky factor of covariance with noise sl = 1 pL = -self.solveChol(L, np.eye(n)) #L = -inv(K+inv(sW^2)) else: L = la.cholesky(K_true/sig2e + np.eye(n), overwrite_a=True, check_finite=False) #Cholesky factor of B sl = sig2e pL = L #L = chol(eye(n)+sW*sW'.*K) alpha = self.solveChol(L, y-m, overwrite_b=False) / sl post = dict([]) post['alpha'] = alpha #return the posterior parameters post['sW'] = np.ones(n) / np.sqrt(sig2e) #sqrt of noise precision vector post['L'] = pL return post
def BorthogonalityTest(B, U): """ Test the frobenious norm of U^TBU - I_k """ BU = np.zeros(U.shape) Bu = Vector() u = Vector() B.init_vector(Bu,0) B.init_vector(u,1) nvec = U.shape[1] for i in range(0,nvec): u.set_local(U[:,i]) B.mult(u,Bu) BU[:,i] = Bu.get_local() UtBU = np.dot(U.T, BU) err = UtBU - np.eye(nvec, dtype=UtBU.dtype) print("|| UtBU - I ||_F = ", np.linalg.norm(err, 'fro') )
def KeyGen(**kwargs): ''' Appendix B of BLISS paper m_bar = m + n o/p: A: Public Key n x m' numpy array S: Secret Key m'x n numpy array ''' q, n, m, alpha = kwargs['q'], kwargs['n'], kwargs['m'], kwargs['alpha'] Aq_bar = util.crypt_secure_matrix(-(q-1)/2, (q-1)/2, n, m) S_bar = util.crypt_secure_matrix(-(2)**alpha, (2)**alpha, m, n) # alpha is small enough, we need not reduce (modq) S = np.vstack((S_bar, np.eye(n, dtype = int))) # dimension is m_bar x n, Elements are in Z mod(2q) A = np.hstack((2*Aq_bar, q * np.eye(n, dtype = int) - 2*np.matmul(Aq_bar,S_bar))) # dimension is n x m_bar , Elements are in Z mod(2q) #return util.matrix_to_Zq(A, 2*q), S, Aq_bar, S_bar return util.matrix_to_Zq(A, 2*q), S
def test(): # Classical SIS parameters n, m, alpha, q = 128, 872, 1, 114356107 kappa = 20 #Discrete Gaussian Parameters sd = 300 eta = 1.2 A, S = KeyGen(q = q,n = n,m = m,alpha = alpha) #print np.array(np.matmul(A,S) - q*np.eye(n),dtype=float)/(2*q) #to test AS = q mod(2q) z, c = Sign(msg = "Hello Bob",A = A,S = S,m = m,n = n,sd = sd,q = q,M = 3.0,kappa = kappa) print z print c print Verify(msg = "Hello Bob", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa) print Verify(msg = "Hello Robert", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa) print Verify(msg = "Hello Roberto", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa) print Verify(msg = "Hola Roberto", A=A, m=m, n=n, sd=sd, q=q, eta=eta, z=z, c=c, kappa = kappa)
def __init__(self, X, Y, R=None, t=None, s=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0): if X.shape[1] != Y.shape[1]: raise 'Both point clouds must have the same number of dimensions!' self.X = X self.Y = Y self.TY = Y (self.N, self.D) = self.X.shape (self.M, _) = self.Y.shape self.R = np.eye(self.D) if R is None else R self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t self.s = 1 if s is None else s self.sigma2 = sigma2 self.iteration = 0 self.maxIterations = maxIterations self.tolerance = tolerance self.w = w self.q = 0 self.err = 0
def __init__(self, X, Y, B=None, t=None, sigma2=None, maxIterations=100, tolerance=0.001, w=0): if X.shape[1] != Y.shape[1]: raise 'Both point clouds must have the same number of dimensions!' self.X = X self.Y = Y self.TY = Y (self.N, self.D) = self.X.shape (self.M, _) = self.Y.shape self.B = np.eye(self.D) if B is None else B self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t self.sigma2 = sigma2 self.iteration = 0 self.maxIterations = maxIterations self.tolerance = tolerance self.w = w self.q = 0 self.err = 0
def get_loss(pred, label, end_points, reg_weight=0.001): """ pred: B*NUM_CLASSES, label: B, """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) classify_loss = tf.reduce_mean(loss) tf.summary.scalar('classify loss', classify_loss) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) tf.summary.scalar('mat loss', mat_diff_loss) return classify_loss + mat_diff_loss * reg_weight
def get_loss(pred, label, end_points, reg_weight=0.001): """ pred: BxNxC, label: BxN, """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) classify_loss = tf.reduce_mean(loss) tf.scalar_summary('classify loss', classify_loss) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) tf.scalar_summary('mat_loss', mat_diff_loss) return classify_loss + mat_diff_loss * reg_weight
def get_loss(l_pred, seg_pred, label, seg, weight, end_points): per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label) label_loss = tf.reduce_mean(per_instance_label_loss) # size of seg_pred is batch_size x point_num x part_cat_num # size of seg is batch_size x point_num per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1) seg_loss = tf.reduce_mean(per_instance_seg_loss) per_instance_seg_pred_res = tf.argmax(seg_pred, 2) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3 return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
def refit_model(self): """Learns a new surrogate model using the data observed so far. """ # only fit the model if there is data for it. if len(self.known_models) > 0: self._build_feature_maps(self.known_models, self.ngram_maxlen, self.thres) X = sp.vstack([ self._compute_features(mdl) for mdl in self.known_models], "csr") y = np.array(self.known_scores, dtype='float64') #A = np.dot(X.T, X) + lamb * np.eye(X.shape[1]) #b = np.dot(X.T, y) self.surr_model = lm.Ridge(self.lamb_ridge) self.surr_model.fit(X, y) # NOTE: if the search space has holes, it break. needs try/except module.
def covariance_matrix(self): if self._debug: # return None ka = self.k_active if ka > 0: C = np.eye(self.N) + np.dot(self.V[:ka].T * self.S[:ka], self.V[:ka]) C = (C * self.D).T * self.D else: C = np.diag(self.D**2) C *= self.sigma**2 else: # Fake Covariance Matrix for Speed C = np.ones(1) self.B = np.ones(1) return C
def Minibatch_Discriminator(input, num_kernels=100, dim_per_kernel=5, init=False, name='MD'): num_inputs=df_dim*4 theta = tf.get_variable(name+"/theta",[num_inputs, num_kernels, dim_per_kernel], initializer=tf.random_normal_initializer(stddev=0.05)) log_weight_scale = tf.get_variable(name+"/lws",[num_kernels, dim_per_kernel], initializer=tf.constant_initializer(0.0)) W = tf.mul(theta, tf.expand_dims(tf.exp(log_weight_scale)/tf.sqrt(tf.reduce_sum(tf.square(theta),0)),0)) W = tf.reshape(W,[-1,num_kernels*dim_per_kernel]) x = input x=tf.reshape(x, [batchsize,num_inputs]) activation = tf.matmul(x, W) activation = tf.reshape(activation,[-1,num_kernels,dim_per_kernel]) abs_dif = tf.mul(tf.reduce_sum(tf.abs(tf.sub(tf.expand_dims(activation,3),tf.expand_dims(tf.transpose(activation,[1,2,0]),0))),2), 1-tf.expand_dims(tf.constant(np.eye(batchsize),dtype=np.float32),1)) f = tf.reduce_sum(tf.exp(-abs_dif),2)/tf.reduce_sum(tf.exp(-abs_dif)) print(f.get_shape()) print(input.get_shape()) return tf.concat(1,[x, f])
def _build_policy(env, predictor, epsilon): eye = np.eye(env.num_states) q_values = predictor.predict( {str(i): eye[i] for i in range(env.num_states)} ) policy_vector = [ env.ACTIONS[np.argmax([q_values[action][i] for action in env.ACTIONS])] for i in range(env.num_states) ] def policy(state) -> str: if np.random.random() < epsilon: return np.random.choice(env.ACTIONS) else: return policy_vector[state] return policy
def export_data(prediction_dir, nii_image_dir, tfrecords_dir, export_dir, transformation=None): for file_path in os.listdir(prediction_dir): name, prediction, probability = read_prediction_file(os.path.join(prediction_dir, file_path)) if transformation: image, ground_truth = get_original_image(os.path.join(tfrecords_dir, name + '.tfrecord'), False) prediction = transformation.transform_image(prediction, probability, image) # build cv_predictions .nii image img = nib.Nifti1Image(prediction, np.eye(4)) img.set_data_dtype(dtype=np.uint8) path = os.path.join(nii_image_dir, name) adc_name = next(l for l in os.listdir(path) if 'MR_ADC' in l) export_image = nib.load(os.path.join(nii_image_dir, name, adc_name, adc_name + '.nii')) i = export_image.get_data() i[:] = img.get_data() # set name to specification and export _id = next(l for l in os.listdir(path) if 'MR_MTT' in l).split('.')[-1] export_path = os.path.join(export_dir, 'SMIR.' + name + '.' + _id + '.nii') nib.save(export_image, os.path.join(export_path))
def eye(sites, ldim): """Returns a MPA representing the identity matrix :param sites: Number of sites :param ldim: Int-like local dimension or iterable of local dimensions :returns: Representation of the identity matrix as MPA >>> I = eye(4, 2) >>> I.ranks, I.shape ((1, 1, 1), ((2, 2), (2, 2), (2, 2), (2, 2))) >>> I = eye(3, (3, 4, 5)) >>> I.shape ((3, 3), (4, 4), (5, 5)) """ if isinstance(ldim, collections.Iterable): ldim = tuple(ldim) assert len(ldim) == sites else: ldim = it.repeat(ldim, sites) return mp.MPArray.from_kron(map(np.eye, ldim))
def _embed_ltens_identity(mpa, embed_tensor=None): """Embed with identity matrices by default. :param embed_tensor: If the MPAs do not have two physical legs or have non-square physical dimensions, you must provide an embedding tensor. The default is to use the square identity matrix, assuming that the size of the two physical legs is the same at each site. :returns: `embed_tensor` with one size-one virtual leg added at each end. """ if embed_tensor is None: pdims = mpa.shape[0] assert len(pdims) == 2 and pdims[0] == pdims[1], ( "For ndims != 2 or non-square shape, you must supply a tensor" "for embedding") embed_tensor = np.eye(pdims[0]) embed_ltens = embed_tensor[None, ..., None] return embed_ltens
def test_povm_normalization_ic(dim): for name, constructor in ALL_POVMS.items(): # Check that the POVM is normalized: elements must sum to the identity current_povm = constructor(dim) element_sum = sum(iter(current_povm)) assert_array_almost_equal(element_sum, np.eye(dim)) # Check that the attribute that says whether the POVM is IC is correct. linear_inversion_recons = np.dot(current_povm.linear_inversion_map, current_povm.probability_map) if current_povm.informationally_complete: assert_array_almost_equal( linear_inversion_recons, np.eye(dim**2), err_msg='POVM {} is not informationally complete'.format(name)) else: assert np.abs(linear_inversion_recons - np.eye(dim**2)).max() > 0.1, \ 'POVM {} is informationally complete'.format(name)
def test_povm_ic_mpa(nr_sites, local_dim, rank, rgen): # Check that the tensor product of the PauliGen POVM is IC. paulis = povm.pauli_povm(local_dim) inv_map = mp_from_array_repeat(paulis.linear_inversion_map, nr_sites) probab_map = mp_from_array_repeat(paulis.probability_map, nr_sites) reconstruction_map = mp.dot(inv_map, probab_map) eye = factory.eye(nr_sites, local_dim**2) assert mp.norm(reconstruction_map - eye) < 1e-5 # Check linear inversion for a particular example MPA. # Linear inversion works for arbitrary matrices, not only for states, # so we test it for an arbitrary MPA. # Normalize, otherwise the absolute error check below will not work. mpa = factory.random_mpa(nr_sites, local_dim**2, rank, dtype=np.complex_, randstate=rgen, normalized=True) probabs = mp.dot(probab_map, mpa) recons = mp.dot(inv_map, probabs) assert mp.norm(recons - mpa) < 1e-6
def test_randomized_svd(rows, cols, rank, dtype, transpose, n_iter, target_gen, rgen): rank = min(rows, cols) - 2 if rank is 'fullrank' else rank A = target_gen(rows, cols, rank=rank, randstate=rgen, dtype=dtype) U_ref, s_ref, V_ref = utils.truncated_svd(A, k=rank) U, s, V = em.randomized_svd(A, rank, transpose=transpose, randstate=rgen, n_iter=n_iter) error_U = np.abs(U.conj().T.dot(U_ref)) - np.eye(rank) assert_allclose(np.linalg.norm(error_U), 0, atol=1e-3) error_V = np.abs(V.dot(V_ref.conj().T)) - np.eye(rank) assert_allclose(np.linalg.norm(error_V), 0, atol=1e-3) assert_allclose(s.ravel() - s_ref, 0, atol=1e-3) # Check that singular values are returned in descending order assert_array_equal(s, np.sort(s)[::-1])
def hessian(self, x, d=None): """ Computes Hessian matrix """ d = calc_distances(x) if d is None else d if d.ndim == 1: d = squareform(d) H = np.zeros((3*len(x), 3*len(x))) n = self.n for i in range(len(x)): for j in range(len(x)): if j == i: continue dx = x[i]-x[j] r = d[i,j] h = n / r**(0.5*n+2) * ((n+2) * np.multiply.outer(dx,dx) - np.eye(3) * r) H[3*i:3*(i+1), 3*j:3*(j+1)] = -h H[3*i:3*(i+1), 3*i:3*(i+1)] += h return H
def apply_gate(self,gate,on_qubit_name): on_qubit=self.qubits.get_quantum_register_containing(on_qubit_name) if len(on_qubit.get_noop()) > 0: print "NOTE this qubit has been measured previously, there should be no more gates allowed but we are reverting that measurement for consistency with IBM's language" on_qubit.set_state(on_qubit.get_noop()) on_qubit.set_noop([]) if not on_qubit.is_entangled(): if on_qubit.get_num_qubits()!=1: raise Exception("This qubit is not marked as entangled but it has an entangled state") on_qubit.set_state(gate*on_qubit.get_state()) else: if not on_qubit.get_num_qubits()>1: raise Exception("This qubit is marked as entangled but it does not have an entangled state") n_entangled=len(on_qubit.get_entangled()) apply_gate_to_qubit_idx=[qb.name for qb in on_qubit.get_entangled()].index(on_qubit_name) if apply_gate_to_qubit_idx==0: entangled_gate=gate else: entangled_gate=Gate.eye for i in range(1,n_entangled): if apply_gate_to_qubit_idx==i: entangled_gate=np.kron(entangled_gate,gate) else: entangled_gate=np.kron(entangled_gate,Gate.eye) on_qubit.set_state(entangled_gate*on_qubit.get_state())
def _matrix_inverse(self, matrix): """ Computes inverse of a matrix. """ matrix = np.array(matrix) n_features = matrix.shape[0] rank = np.linalg.matrix_rank(matrix) if rank == n_features: return np.linalg.inv(matrix) else: # Matrix is not full rank, so use Hadi's technique to compute inverse # Reference: Ali S. Hadi (1992) "Identifying Multiple Outliers in Multivariate Data" eg. 2.3, 2.4 eigenValues, eigenVectors = np.linalg.eig(matrix) eigenValues = np.abs(eigenValues) # to deal with -0 values idx = eigenValues.argsort()[::-1] eigenValues = eigenValues[idx] eigenVectors = eigenVectors[:, idx] s = eigenValues[eigenValues != 0].min() w = [1 / max(e, s) for e in eigenValues] W = w * np.eye(n_features) return eigenVectors.dot(W).dot(eigenVectors.T)
def swap_affine(axes): """ Build a correction matrix, from the given orientation of axes to RAS. Parameters ---------- axes: str (manadtory) the given orientation of the axes. Returns ------- rotation: array (4, 4) the correction matrix. """ rotation = numpy.eye(4) rotation[:3, 0] = CORRECTION_MATRIX_COLUMNS[axes[0]] rotation[:3, 1] = CORRECTION_MATRIX_COLUMNS[axes[1]] rotation[:3, 2] = CORRECTION_MATRIX_COLUMNS[axes[2]] return rotation
def distribution_parameters(self, parameter_name): if parameter_name=='trend': E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix) mean = dot(inv(eye(self.size)+E),self.data) cov = (self.parameters.list['sigma2'].current_value)*inv(eye(self.size)+E) return {'mean' : mean, 'cov' : cov} elif parameter_name=='sigma2': E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix) pos = self.size loc = 0 scale = 0.5*dot((self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)).T,(self.data-dot(eye(self.size),self.parameters.list['trend'].current_value)))+0.5*dot(dot(self.parameters.list['trend'].current_value.T,E),self.parameters.list['trend'].current_value) elif parameter_name=='lambda2': pos = self.size-self.total_variation_order-1+self.alpha loc = 0.5*(norm(dot(self.derivative_matrix,self.parameters.list['trend'].current_value),ord=1))/self.parameters.list['sigma2'].current_value+self.rho scale = 1 elif parameter_name==str('omega'): pos = [sqrt(((self.parameters.list['lambda2'].current_value**2)*self.parameters.list['sigma2'].current_value)/(dj**2)) for dj in dot(self.derivative_matrix,self.parameters.list['trend'].current_value)] loc = 0 scale = self.parameters.list['lambda2'].current_value**2 return {'pos' : pos, 'loc' : loc, 'scale' : scale}
def estimate_time_constant(y, p=2, sn=None, lags=5, fudge_factor=1.): """ Estimate AR model parameters through the autocovariance function Parameters ---------- y : array, shape (T,) One dimensional array containing the fluorescence intensities with one entry per time-bin. p : positive integer order of AR system sn : float sn standard deviation, estimated if not provided. lags : positive integer number of additional lags where he autocovariance is computed fudge_factor : float (0< fudge_factor <= 1) shrinkage factor to reduce bias Returns ------- g : estimated coefficients of the AR process """ if sn is None: sn = GetSn(y) lags += p xc = axcov(y, lags) xc = xc[:, np.newaxis] A = scipy.linalg.toeplitz(xc[lags + np.arange(lags)], xc[lags + np.arange(p)]) - sn**2 * np.eye(lags, p) g = np.linalg.lstsq(A, xc[lags + 1:])[0] gr = np.roots(np.concatenate([np.array([1]), -g.flatten()])) gr = (gr + gr.conjugate()) / 2. gr[gr > 1] = 0.95 + np.random.normal(0, 0.01, np.sum(gr > 1)) gr[gr < 0] = 0.15 + np.random.normal(0, 0.01, np.sum(gr < 0)) g = np.poly(fudge_factor * gr) g = -g[1:] return g.flatten()
def gaussian_function(y, dimension, ?, cov, log=False, standard=False): """??????????? y???(???) ??????(?????) cov??????,log????????,standard??????""" x = y - ? if standard: x = np.dot(x, np.linalg.inv(cov) ** 0.5) cov_ = np.eye(dimension) else: cov_ = cov np.seterr(all='ignore') # ?????? if log: func = - (dimension / 2) * np.log(2 * math.pi) - 0.5 * np.log(np.linalg.det(cov_)) exp = -0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T) return func + exp else: sigma = (2 * math.pi) ** (dimension / 2) * np.linalg.det(cov_) ** 0.5 func = 1. / sigma exp = np.exp(-0.5 * np.dot(np.dot(x, np.linalg.inv(cov_)), x.T)) return func * exp
def batch_works(k): if k == n_processes - 1: paths = all_paths[k * int(len(all_paths) / n_processes) : ] else: paths = all_paths[k * int(len(all_paths) / n_processes) : (k + 1) * int(len(all_paths) / n_processes)] for path in paths: probs = np.load(os.path.join(input_path, path)) pred = np.argmax(probs, axis=3) fg_prob = 1 - probs[..., 0] pred = clean_contour(fg_prob, pred) seg = np.zeros(pred.shape, dtype=np.int16) seg[pred == 1] = 1 seg[pred == 2] = 2 seg[pred == 3] = 4 img = nib.Nifti1Image(seg, np.eye(4)) nib.save(img, os.path.join(output_path, path.replace('_probs.npy', '.nii.gz')))
def __prepare_controls_and_actions(self): self.__discrete_controls_to_net = np.array([i for i in range(len(self.__discrete_controls)) if i not in self.__discrete_controls_manual]) self.__num_manual_controls = len(self.__discrete_controls_manual) self.__net_discrete_actions = [] if not self.__opposite_button_pairs: for perm in it.product([False, True], repeat=len(self.__discrete_controls_to_net)): self.__net_discrete_actions.append(list(perm)) else: for perm in it.product([False, True], repeat=len(self.__discrete_controls_to_net)): act = list(perm) valid = True for button_p in self.__opposite_button_pairs: if act[button_p[0]] and act[button_p[1]]: valid = False if valid: self.__net_discrete_actions.append(act) self.__num_net_discrete_actions = len(self.__net_discrete_actions) self.__action_to_index = {tuple(val): ind for (ind, val) in enumerate(self.__net_discrete_actions)} self.__net_discrete_actions = np.array(self.__net_discrete_actions) self.__onehot_discrete_acitons = np.eye(self.__num_net_discrete_actions)
def initialize_match_matrix(self): """ Construct the initial match matrix. Returns: -------- match_matrix: array The match matrix """ # TODO add possibility for slack match_matrix = np.zeros((self.reactants_elements.size, self.products_elements.size)) # set sub blocks of the match matrix to one plus random pertubation # followed by column normalization for indices in self.element_type_subset_indices: match_matrix[indices] = 1 + 1e-3 * np.random.random(match_matrix[indices].shape) match_matrix[indices] /= match_matrix[indices].sum(0) #match_matrix = np.eye(match_matrix.shape[0]) #for i,j in [(0,0),(0,4),(1,1),(1,3),(4,0),(4,4),(3,3),(3,1),(7,7),(7,11),(8,8),(8,10),(20,20),(20,24),(21,21),(21,23),(11,7),(11,11),(10,8),(10,10),(24,20),(24,24),(23,23),(23,21)]: # match_matrix[i,j] = 0.5 return match_matrix
def test_nonzero_twodim(self): x = np.array([[0, 1, 0], [2, 0, 3]]) assert_equal(np.count_nonzero(x), 3) assert_equal(np.nonzero(x), ([0, 1, 1], [1, 0, 2])) x = np.eye(3) assert_equal(np.count_nonzero(x), 3) assert_equal(np.nonzero(x), ([0, 1, 2], [0, 1, 2])) x = np.array([[(0, 1), (0, 0), (1, 11)], [(1, 1), (1, 0), (0, 0)], [(0, 0), (1, 5), (0, 1)]], dtype=[('a', 'f4'), ('b', 'u1')]) assert_equal(np.count_nonzero(x['a']), 4) assert_equal(np.count_nonzero(x['b']), 5) assert_equal(np.nonzero(x['a']), ([0, 1, 1, 2], [2, 0, 1, 1])) assert_equal(np.nonzero(x['b']), ([0, 0, 1, 2, 2], [0, 2, 0, 1, 2])) assert_(not x['a'].T.flags.aligned) assert_equal(np.count_nonzero(x['a'].T), 4) assert_equal(np.count_nonzero(x['b'].T), 5) assert_equal(np.nonzero(x['a'].T), ([0, 1, 1, 2], [1, 1, 2, 0])) assert_equal(np.nonzero(x['b'].T), ([0, 0, 1, 2, 2], [0, 1, 2, 0, 2]))
def test_matrix_rank(self): # Full rank matrix yield assert_equal, 4, matrix_rank(np.eye(4)) # rank deficient matrix I = np.eye(4) I[-1, -1] = 0. yield assert_equal, matrix_rank(I), 3 # All zeros - zero rank yield assert_equal, matrix_rank(np.zeros((4, 4))), 0 # 1 dimension - rank 1 unless all 0 yield assert_equal, matrix_rank([1, 0, 0, 0]), 1 yield assert_equal, matrix_rank(np.zeros((4,))), 0 # accepts array-like yield assert_equal, matrix_rank([1]), 1 # greater than 2 dimensions raises error yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2)) # works on scalar yield assert_equal, matrix_rank(1), 1
def test_byteorder_check(): # Byte order check should pass for native order if sys.byteorder == 'little': native = '<' else: native = '>' for dtt in (np.float32, np.float64): arr = np.eye(4, dtype=dtt) n_arr = arr.newbyteorder(native) sw_arr = arr.newbyteorder('S').byteswap() assert_equal(arr.dtype.byteorder, '=') for routine in (linalg.inv, linalg.det, linalg.pinv): # Normal call res = routine(arr) # Native but not '=' assert_array_equal(res, routine(n_arr)) # Swapped assert_array_equal(res, routine(sw_arr))
def test_100(self): x, w = leg.leggauss(100) # test orthogonality. Note that the results need to be normalized, # otherwise the huge values that can arise from fast growing # functions like Laguerre can be very confusing. v = leg.legvander(x, 99) vv = np.dot(v.T * w, v) vd = 1/np.sqrt(vv.diagonal()) vv = vd[:, None] * vv * vd assert_almost_equal(vv, np.eye(100)) # check that the integral of 1 is correct tgt = 2.0 assert_almost_equal(w.sum(), tgt)
def _fwlinear(self, args, output_size, scope=None): if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] assert len(args) == 2 assert args[0].get_shape().as_list()[1] == output_size dtype = [a.dtype for a in args][0] with vs.variable_scope(scope or "Linear"): matrixW = vs.get_variable( "MatrixW", dtype=dtype, initializer=tf.convert_to_tensor(np.eye(output_size, dtype=np.float32) * .05)) matrixC = vs.get_variable( "MatrixC", [args[1].get_shape().as_list()[1], output_size], dtype=dtype) res = tf.matmul(args[0], matrixW) + tf.matmul(args[1], matrixC) return res
def getValuesFromPose(self, P): '''return the virtual values of the pots corresponding to the pose P''' vals = [] grads = [] for i, r, l, placement, attach_p in zip(range(3), self.rs, self.ls, self.placements, self.attach_ps): #first pot axis a = placement.rot * col([1, 0, 0]) #second pot axis b = placement.rot * col([0, 1, 0]) #string axis c = placement.rot * col([0, 0, 1]) #attach point on the joystick p_joystick = P * attach_p v = p_joystick - placement.trans va = v - dot(v, a)*a vb = v - dot(v, b)*b #angles of the pots alpha = math.atan2(dot(vb, a), dot(vb, c)) beta = math.atan2(dot(va, b), dot(va, c)) vals.append(alpha) vals.append(beta) #calculation of the derivatives dv = np.bmat([-P.rot.mat() * quat.skew(attach_p), P.rot.mat()]) dva = (np.eye(3) - a*a.T) * dv dvb = (np.eye(3) - b*b.T) * dv dalpha = (1/dot(vb,vb)) * (dot(vb,c) * a.T - dot(vb,a) * c.T) * dvb dbeta = (1/dot(va,va)) * (dot(va,c) * b.T - dot(va,b) * c.T) * dva grads.append(dalpha) grads.append(dbeta) return (col(vals), np.bmat([[grads]]))
def prepare_cholesky(N=100, dtype=np.double): N = int(N*2) A = np.asarray(np.random.rand(N, N), dtype=dtype) return ( A*A.transpose() + N*np.eye(N), ) #return toc/trials, N*N*N/3.0*1e-9, times #inv: return toc/trials, 2*N*N*N*1e-9, times ##################################################################################
def update_filter(self, timestep, estimate, ranges): """Update position filter. Args: timestep (float): Time elapsed since last update. estimate (StateEstimate): Position estimate to update. ranges (list of floats): Range measurements. Returns: new_estimate (StateEstimate): Updated position estimate. outlier_flag (bool): Flag indicating whether the measurement was rejected as an outlier. """ num_of_units = len(ranges) x = estimate.state P = estimate.covariance # Compute process matrix and covariance matrices F, Q, R = self._compute_process_and_covariance_matrices(timestep) # rospy.logdebug('F: {}'.format(F)) # rospy.logdebug('Q: {}'.format(Q)) # rospy.logdebug('R: {}'.format(R)) # Prediction x = np.dot(F, x) P = np.dot(F, np.dot(P, F.T)) + Q # Update n = np.copy(x) H = np.zeros((num_of_units, x.size)) z = np.zeros((num_of_units, 1)) h = np.zeros((num_of_units, 1)) for i in xrange(self.ikf_iterations): n, K, outlier_flag = self._ikf_iteration(x, n, ranges, h, H, z, estimate, R) if outlier_flag: new_estimate = estimate else: new_state = n new_covariance = np.dot((np.eye(6) - np.dot(K, H)), P) new_estimate = UWBTracker.StateEstimate(new_state, new_covariance) return new_estimate, outlier_flag
def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision
def getTrainKernel(self, params): self.checkParams(params) return np.eye(self.n)
def AorthogonalityCheck(A, U, d): """ Test the frobenious norm of D^{-1}(U^TAU) - I_k """ V = np.zeros(U.shape) AV = np.zeros(U.shape) Av = Vector() v = Vector() A.init_vector(Av,0) A.init_vector(v,1) nvec = U.shape[1] for i in range(0,nvec): v.set_local(U[:,i]) v *= 1./math.sqrt(d[i]) A.mult(v,Av) AV[:,i] = Av.get_local() V[:,i] = v.get_local() VtAV = np.dot(V.T, AV) err = VtAV - np.eye(nvec, dtype=VtAV.dtype) # plt.imshow(np.abs(err)) # plt.colorbar() # plt.show() print("i, ||Vt(i,:)AV(:,i) - I_i||_F, V[:,i] = 1/sqrt(lambda_i) U[:,i]") for i in range(1,nvec+1): print(i, np.linalg.norm(err[0:i,0:i], 'fro') )
def chol_inv(B, lower=True): """ Returns the inverse of matrix A, where A = B*B.T, ie B is the Cholesky decomposition of A. Solves Ax = I given B is the cholesky factorization of A. """ return cho_solve((B, lower), np.eye(B.shape[0]))
def test_theta_0(): rng.seed(0) n_samples = 100 Y = rng.randn(n_samples, 5) X = rng.randn(n_samples, 5) sgcrf = SparseGaussianCRF(lamL=0.01, lamT=0.01) sgcrf.fit(X, Y) assert np.allclose(sgcrf.Lam, np.eye(5), .1, .2)
def init_layers(X, Z, dims, final_mean_function): M = Z.shape[0] q_mus, q_sqrts, mean_functions, Zs = [], [], [], [] X_running, Z_running = X.copy(), Z.copy() for dim_in, dim_out in zip(dims[:-2], dims[1:-1]): if dim_in == dim_out: # identity for same dims W = np.eye(dim_in) elif dim_in > dim_out: # use PCA mf for stepping down _, _, V = np.linalg.svd(X_running, full_matrices=False) W = V[:dim_out, :].T elif dim_in < dim_out: # identity + pad with zeros for stepping up I = np.eye(dim_in) zeros = np.zeros((dim_in, dim_out - dim_in)) W = np.concatenate([I, zeros], 1) mean_functions.append(Linear(A=W)) Zs.append(Z_running.copy()) q_mus.append(np.zeros((M, dim_out))) q_sqrts.append(np.eye(M)[:, :, None] * np.ones((1, 1, dim_out))) Z_running = Z_running.dot(W) X_running = X_running.dot(W) # final layer (as before but no mean function) mean_functions.append(final_mean_function) Zs.append(Z_running.copy()) q_mus.append(np.zeros((M, dims[-1]))) q_sqrts.append(np.eye(M)[:, :, None] * np.ones((1, 1, dims[-1]))) return q_mus, q_sqrts, Zs, mean_functions
def eye_mask(self, shape): """ Build a mask using np.eye. """ return ~np.eye(*shape, dtype=bool)
def gl_update_joint_matrices(self,node,parent_joint=None,parent_joint_matrix=numpy.eye(3,4,dtype=numpy.float32)): for child in node.children: if child.node_type == j3d.inf1.NodeType.JOINT: joint = self.gl_joints[child.index] joint_matrix = self.gl_joint_matrices[child.index] joint_matrix[:] = joint.create_matrix(parent_joint,parent_joint_matrix) self.gl_update_joint_matrices(child,joint,joint_matrix) else: self.gl_update_joint_matrices(child,parent_joint,parent_joint_matrix)
def KeyGen_test(): A, S, Aq_bar, S_bar = KeyGen(q = 7,n = 5, m = 7, alpha=1) print Aq_bar print S_bar print A print S print np.matmul(A,S) - 7*np.eye(5, dtype=int)
def __init__(self, Y, R=None, t=None, maxIterations=100, gamma=0.1, ): if Y is None: raise 'Empty list of point clouds!' dimensions = [cloud.shape[1] for cloud in Y] if not all(dimension == dimensions[0] for dimension in dimensions): raise 'All point clouds must have the same number of dimensions!' self.Y = Y self.M = [cloud.shape[0] for cloud in self.Y] self.D = dimensions[0] if R: rotations = [rotation.shape for rotation in R] if not all(rotation[0] == self.D and rotation[1] == self.D for rotation in rotations): raise 'All rotation matrices need to be %d x %d matrices!' % (self.D, self.D) self.R = R else: self.R = [np.eye(self.D) for cloud in Y] if t: translations = [translations.shape for translation in t] if not all(translations[0] == 1 and translations[1] == self.D for translation in translations): raise 'All translation vectors need to be 1 x %d matrices!' % (self.D) self.t = t else: self.t = [np.atleast_2d(np.zeros((1, self.D))) for cloud in self.Y]