我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用sklearn.utils.as_float_array()。
def _clean_nans(scores): scores = as_float_array(scores, copy=True) scores[np.isnan(scores)] = np.finfo(scores.dtype).min return scores
def fit(self, X, y=None): # X = array2d(X) n_samples, n_features = X.shape X = as_float_array(X, copy=self.copy) # substracts the mean for each feature vector self.mean_ = np.mean(X, axis=0) X -= self.mean_ eigs, eigv = eigh(np.dot(X.T, X) / n_samples + \ self.bias * np.identity(n_features)) components = np.dot(eigv * np.sqrt(1.0 / eigs), eigv.T) self.components_ = components # Order the explained variance from greatest to least self.explained_variance_ = eigs[::-1] return self
def fit(self, X, y=None, **fit_params): """Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self """ X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) self.init_coefs(X) if self.method == 'quic': (self.precision_, self.covariance_, self.opt_, self.cputime_, self.iters_, self.duality_gap_) = quic( self.sample_covariance_, self.lam * self.lam_scale_, mode=self.mode, tol=self.tol, max_iter=self.max_iter, Theta0=self.Theta0, Sigma0=self.Sigma0, path=self.path_, msg=self.verbose ) else: raise NotImplementedError( "Only method='quic' has been implemented.") self.is_fitted = True return self
def fit(self, X, y): """ Fit the model using X, y as training data. Parameters ---------- X : {array-like, sparse matrix} of shape [n_samples, n_features] Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like of shape [n_samples, n_outputs] Target values (class labels in classification, real numbers in regression) Returns ------- self : object Returns an instance of self. """ # fit random hidden layer and compute the hidden layer activations self.hidden_activations_ = self.hidden_layer.fit_transform(X) # solve the regression from hidden activations to outputs self._fit_regression(as_float_array(y, copy=True)) return self
def transform(self, X): if isinstance(X, pd.Series): return X.to_frame() X = as_float_array(X) X = check_array(X) return pd.DataFrame(X, index=self.index, columns=self.columns, dtype=self.dtype)
def test_as_float_array(): # Test function for as_float_array X = np.ones((3, 10), dtype=np.int32) X = X + np.arange(10, dtype=np.int32) # Checks that the return type is ok X2 = as_float_array(X, copy=False) np.testing.assert_equal(X2.dtype, np.float32) # Another test X = X.astype(np.int64) X2 = as_float_array(X, copy=True) # Checking that the array wasn't overwritten assert_true(as_float_array(X, False) is not X) # Checking that the new type is ok np.testing.assert_equal(X2.dtype, np.float64) # Here, X is of the right type, it shouldn't be modified X = np.ones((3, 2), dtype=np.float32) assert_true(as_float_array(X, copy=False) is X) # Test that if X is fortran ordered it stays X = np.asfortranarray(X) assert_true(np.isfortran(as_float_array(X, copy=True))) # Test the copy parameter with some matrices matrices = [ np.matrix(np.arange(5)), sp.csc_matrix(np.arange(5)).toarray(), sparse_random_matrix(10, 10, density=0.10).toarray() ] for M in matrices: N = as_float_array(M, copy=True) N[0, 0] = np.nan assert_false(np.isnan(M).any())
def test_np_matrix(): # Confirm that input validation code does not return np.matrix X = np.arange(12).reshape(3, 4) assert_false(isinstance(as_float_array(X), np.matrix)) assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix)) assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap(): # Confirm that input validation code doesn't copy memory mapped arrays asflt = lambda x: as_float_array(x, copy=False) with NamedTemporaryFile(prefix='sklearn-test') as tmp: M = np.memmap(tmp, shape=(10, 10), dtype=np.float32) M[:] = 0 for f in (check_array, np.asarray, asflt): X = f(M) X[:] = 1 assert_array_equal(X.ravel(), M.ravel()) X[:] = 0
def fit(self, X, y=None): """Estimate the precision using an adaptive maximum likelihood estimator. Parameters ---------- X : ndarray, shape (n_samples, n_features) Data from which to compute the proportion matrix. """ X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) n_samples, n_features = X.shape # perform first estimate self.estimator.fit(X) if self.method == 'binary': # generate weights self.lam_ = self._binary_weights(self.estimator) # perform second step adaptive estimate self.estimator_ = QuicGraphLasso( lam=self.lam_ * self.estimator.lam_, mode='default', init_method='cov', auto_scale=False ) self.estimator_.fit(X) elif self.method == 'inverse_squared': self.lam_ = self._inverse_squared_weights(self.estimator) # perform second step adaptive estimate self.estimator_ = QuicGraphLassoCV( lam=self.lam_ * self.estimator.lam_, auto_scale=False ) self.estimator_.fit(X) elif self.method == 'inverse': self.lam_ = self._inverse_weights(self.estimator) # perform second step adaptive estimate self.estimator_ = QuicGraphLassoCV( lam=self.lam_ * self.estimator.lam_, auto_scale=False ) self.estimator_.fit(X) else: raise NotImplementedError( ("Only method='binary', 'inverse_squared', or", "'inverse' have been implemented.") ) self.is_fitted = True return self