我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用numpy.asarray_chkfinite()。
def as1D(x): """ Convert input into to 1D numpy array. Returns ------- x : 1D array Examples ------- >>> as1D(5) array([5]) >>> as1D([1,2,3]) array([1, 2, 3]) >>> as1D([[3,4,5,6]]) array([3, 4, 5, 6]) """ if not isinstance(x, np.ndarray): x = np.asarray_chkfinite(x) if x.ndim < 1: x = np.asarray_chkfinite([x]) elif x.ndim > 1: x = np.squeeze(x) return x
def as2D(x): """ Convert input into to 2D numpy array. Returns ------- x : 2D array Examples ------- >>> as2D(5) array([[5]]) >>> as2D([1,2,3]) array([[1, 2, 3]]) >>> as2D([[3,4,5,6]]) array([[3, 4, 5, 6]]) """ if not isinstance(x, np.ndarray): x = np.asarray_chkfinite(x) if x.ndim < 1: x = np.asarray_chkfinite([x]) while x.ndim < 2: x = x[np.newaxis, :] return x
def as3D(x): """ Convert input into to 3D numpy array. Returns ------- x : 3D array Examples ------- >>> as3D(5) array([[[5]]]) >>> as3D([1,2,3]) array([[[1, 2, 3]]]) >>> as3D([[3,4,5,6]]) array([[[3, 4, 5, 6]]]) """ if not isinstance(x, np.ndarray): x = np.asarray_chkfinite(x) if x.ndim < 1: x = np.asarray_chkfinite([x]) while x.ndim < 3: x = x[np.newaxis, :] return x
def toCArray(X, dtype=np.float64): """ Convert input into numpy array of C-contiguous order. Ensures returned array is aligned and owns its own data, not a view of another array. Returns ------- X : ND array Examples ------- >>> Q = np.zeros(10, dtype=np.int32, order='F') >>> toCArray(Q).flags.c_contiguous True >>> toCArray(Q).dtype.byteorder '=' """ X = np.asarray_chkfinite(X, dtype=dtype, order='C') if X.dtype.byteorder != '=': X = X.newbyteorder('=').copy() if not X.flags.owndata or X.flags.aligned: X = X.copy() assert X.flags.owndata assert X.flags.aligned return X
def _do_tSSS(clean_data, orig_in_data, resid, st_correlation, n_positions, t_str): """Compute and apply SSP-like projection vectors based on min corr""" np.asarray_chkfinite(resid) t_proj = _overlap_projector(orig_in_data, resid, st_correlation) # Apply projector according to Eq. 12 in [2]_ msg = (' Projecting %2d intersecting tSSS components ' 'for %s' % (t_proj.shape[1], t_str)) if n_positions > 1: msg += ' (across %2d positions)' % n_positions logger.info(msg) clean_data -= np.dot(np.dot(clean_data, t_proj), t_proj.T)
def check_and_set_idx(ids, idx, prefix): """ Reconciles passed-in IDs and indices and returns indices, as well as unique IDs in the order specified by the indices. If only IDs supplied, returns the sort-arg as the index. If only indices supplied, returns None for IDs. If both supplied, checks that the correspondence is unique and returns unique IDs in the sort order of the associated index. :param np.ndarray ids: array of IDs :param np.ndarray[int] idx: array of indices :param str prefix: variable name (for error logging) :return: unique IDs and indices (passed in or derived from the IDs) :rtype: np.ndarray, np.ndarray """ if ids is None and idx is None: raise ValueError('Both {}_ids and {}_idx cannot be None'.format(prefix, prefix)) if ids is None: return None, np.asarray_chkfinite(idx) if idx is None: return np.unique(ids, return_inverse=True) else: ids = np.asarray(ids) idx = np.asarray_chkfinite(idx) if len(idx) != len(ids): raise ValueError('{}_ids ({}) and {}_idx ({}) must have the same length'.format( prefix, len(ids), prefix, len(idx))) uniq_idx, idx_sort_index = np.unique(idx, return_index=True) # make sure each unique index corresponds to a unique id if not all(len(set(ids[idx == i])) == 1 for i in uniq_idx): raise ValueError("Each index must correspond to a unique {}_id".format(prefix)) return ids[idx_sort_index], idx
def __init__(self, correct, student_ids=None, item_ids=None, student_idx=None, item_idx=None, is_held_out=None, num_students=None, num_items=None, **bn_learner_kwargs): """ :param np.ndarray[bool] correct: a 1D array of correctness values :param np.ndarray|None student_ids: student identifiers for each interaction; if no student indices provided, sort order of these ids determines theta indices. :param np.ndarray|None item_ids: item identifiers for each interaction; if no item indices are provided, sort order of these ids determines item indices. :param np.ndarray[int]|None student_idx: a 1D array mapping `correct` to student index :param np.ndarray[int]|None item_idx: a 1D array mapping `correct` to item index :param np.ndarray[bool] is_held_out: a 1D array indicating whether the interaction should be held out from training (if not all zeros, a held_out test node will be added to learner) :param int|None num_students: optional number of students. Default is one plus the maximum index. :param int|None num_items: optional number of items. Default is one plus the maximum index. :param bn_learner_kwargs: arguments to be passed on to the BayesNetLearner init """ # convert pandas Series to np.ndarray and check argument dimensions correct = np.asarray_chkfinite(correct, dtype=bool) student_ids, student_idx = check_and_set_idx(student_ids, student_idx, 'student') item_ids, item_idx = check_and_set_idx(item_ids, item_idx, 'item') if len(correct) != len(student_idx) or len(correct) != len(item_idx): raise ValueError("number of elements in correct ({}), student_idx ({}), and item_idx" "({}) must be the same".format(len(correct), len(student_idx), len(item_idx))) if is_held_out is not None and ( len(is_held_out) != len(correct) or is_held_out.dtype != bool): raise ValueError("held_out ({}) must be None or an array of bools the same length as " "correct ({})".format(len(is_held_out), len(correct))) self.num_students = set_or_check_min(num_students, np.max(student_idx) + 1, 'num_students') self.num_items = set_or_check_min(num_items, np.max(item_idx) + 1, 'num_items') theta_node = DefaultGaussianNode(THETAS_KEY, self.num_students, ids=student_ids) offset_node = DefaultGaussianNode(OFFSET_COEFFS_KEY, self.num_items, ids=item_ids) nodes = [theta_node, offset_node] # add response nodes (train/test if there is held-out data; else just the train set) if is_held_out is not None and np.sum(is_held_out): if np.sum(is_held_out) == len(is_held_out): raise ValueError("some interactions must be not held out") is_held_out = np.asarray_chkfinite(is_held_out, dtype=bool) node_names = (TRAIN_RESPONSES_KEY, TEST_RESPONSES_KEY) response_idxs = (np.logical_not(is_held_out), is_held_out) else: node_names = (TRAIN_RESPONSES_KEY,) response_idxs = (np.ones_like(correct, dtype=bool),) for node_name, response_idx in zip(node_names, response_idxs): cpd = OnePOCPD(item_idx=item_idx[response_idx], theta_idx=student_idx[response_idx], num_thetas=self.num_students, num_items=self.num_items) param_nodes = {THETAS_KEY: theta_node, OFFSET_COEFFS_KEY: offset_node} nodes.append(Node(name=node_name, data=correct[response_idx], cpd=cpd, solver_pars=SolverPars(learn=False), param_nodes=param_nodes, held_out=(node_name == TEST_RESPONSES_KEY))) # store leaf nodes for learning super(OnePOLearner, self).__init__(nodes=nodes, **bn_learner_kwargs)