我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.asanyarray()。
def roll_zeropad(a, shift, axis=None): a = np.asanyarray(a) if shift == 0: return a if axis is None: n = a.size reshape = True else: n = a.shape[axis] reshape = False if np.abs(shift) > n: res = np.zeros_like(a) elif shift < 0: shift += n zeros = np.zeros_like(a.take(np.arange(n-shift), axis)) res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis) else: zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis)) res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis) if reshape: return res.reshape(a.shape) else: return res
def mean_absolute_percentage_error(y_true, y_pred): """ Use of this metric is not recommended; for illustration only. See other regression metrics on sklearn docs: http://scikit-learn.org/stable/modules/classes.html#regression-metrics Use like any other metric >>> y_true = [3, -0.5, 2, 7]; y_pred = [2.5, -0.3, 2, 8] >>> mean_absolute_percentage_error(y_true, y_pred) Out[]: 24.791666666666668 """ y_true = np.asanyarray(y_true) y_pred = np.asanyarray(y_pred) assert_all_finite(y_true) assert_all_finite(y_pred) #Filter zero values in y_true sel = (y_true != 0) y_true = y_true[sel] y_pred = y_pred[sel] ## Note: does not handle mix 1d representation #if _is_1d(y_true): # y_true, y_pred = _check_1d_array(y_true, y_pred) # return np.abs((y_true - y_pred) / y_true.astype(np.float32)).sum()/float(district_num * dateslot_num) return np.mean(np.abs((y_true - y_pred) / y_true.astype(np.float32)))
def compressed(x): """ Return all the non-masked data as a 1-D array. This function is equivalent to calling the "compressed" method of a `MaskedArray`, see `MaskedArray.compressed` for details. See Also -------- MaskedArray.compressed Equivalent method. """ if not isinstance(x, MaskedArray): x = asanyarray(x) return x.compressed()
def example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj): # aggregate the kernel matrix to save memory svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1) clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj) rearranged_data = raw_data[num_epochs_per_subj:] + raw_data[0:num_epochs_per_subj] rearranged_labels = labels[num_epochs_per_subj:] + labels[0:num_epochs_per_subj] clf.fit(list(zip(rearranged_data, rearranged_data)), rearranged_labels, num_training_samples=num_epochs_per_subj*(num_subjects-1)) predict = clf.predict() print(predict) print(clf.decision_function()) test_labels = labels[0:num_epochs_per_subj] incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj logger.info( 'when aggregating the similarity matrix to save memory, ' 'the accuracy is %d / %d = %.2f' % (num_epochs_per_subj-incorrect_predict, num_epochs_per_subj, (num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj) ) # when the kernel matrix is computed in portion, the test data is already in print(clf.score(None, test_labels))
def example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj): # aggregate the kernel matrix to save memory svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1) clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj) num_training_samples=num_epochs_per_subj*(num_subjects-1) clf.fit(list(zip(raw_data[0:num_training_samples], raw_data2[0:num_training_samples])), labels[0:num_training_samples]) X = list(zip(raw_data[num_training_samples:], raw_data2[num_training_samples:])) predict = clf.predict(X) print(predict) print(clf.decision_function(X)) test_labels = labels[num_training_samples:] incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj logger.info( 'when aggregating the similarity matrix to save memory, ' 'the accuracy is %d / %d = %.2f' % (num_epochs_per_subj-incorrect_predict, num_epochs_per_subj, (num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj) ) # when the kernel matrix is computed in portion, the test data is already in print(clf.score(X, test_labels))
def example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj): # aggregate the kernel matrix to save memory svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1) clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj) num_training_samples=num_epochs_per_subj*(num_subjects-1) clf.fit(list(zip(raw_data, raw_data2)), labels, num_training_samples=num_training_samples) predict = clf.predict() print(predict) print(clf.decision_function()) test_labels = labels[num_training_samples:] incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj logger.info( 'when aggregating the similarity matrix to save memory, ' 'the accuracy is %d / %d = %.2f' % (num_epochs_per_subj-incorrect_predict, num_epochs_per_subj, (num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj) ) # when the kernel matrix is computed in portion, the test data is already in print(clf.score(None, test_labels)) # python3 classification.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
def _fit(self, X, y, warm_start=None): if warm_start is None: self.coef_ = np.zeros(X.shape[1]) else: self.coef_ = np.asanyarray(warm_start) l1l2_proximal = l1l2_regularization self.coef_, self.niter_ = l1l2_proximal(X, y, self.mu, self.tau, beta=self.coef_, kmax=self.max_iter, tolerance=self.tol, return_iterations=True, adaptive=self.adaptive_step_size) if self.niter_ == self.max_iter: warnings.warn('Objective did not converge, you might want' ' to increase the number of iterations') return self
def fit(self, X, y, *args, **kwargs): X = np.asanyarray(X) y = np.asanyarray(y) # Centering Data X, y, X_offset, y_offset, X_scale, precompute, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=False) # Calling the class-specific train method self._fit(X, y, *args, **kwargs) # Fitting the intercept if required self._set_intercept(X_offset, y_offset, X_scale) self._trained = True return self
def fit(self, X, y): X = np.asanyarray(X) y = np.asanyarray(y) # Selection self.selector.fit(X, y) self.selected_ = (np.abs(self.selector.coef_) >= self.threshold) # Final Estimation self.estimator.fit(X[:, self.selected_], y) # Coefficients self.coef_ = np.zeros_like(self.selector.coef_) self.coef_[self.selected_] = self.estimator.coef_ self.intercept_ = self.estimator.intercept_ return self
def rmse(predictions, targets): """Calculate the root mean squared error of an array of predictions. Parameters ---------- predictions : array_like Array of predicted values. targets : array_like Array of target values. Returns ------- rmse: float Root mean squared error of the predictions wrt the targets. """ predictions = np.asanyarray(predictions) targets = np.asanyarray(targets) rmse = np.sqrt(np.nanmean((predictions - targets) ** 2)) return rmse
def __getitem__(self, *idx): """TuningCurve1D index access. Accepts integers, slices, and lists""" idx = [ii for ii in idx] if len(idx) == 1 and not isinstance(idx[0], int): idx = idx[0] if isinstance(idx, tuple): idx = [ii for ii in idx] if self.isempty: return self try: out = copy.copy(self) out._ratemap = self.ratemap[idx,:] out._unit_ids = (np.asanyarray(out._unit_ids)[idx]).tolist() out._unit_labels = (np.asanyarray(out._unit_labels)[idx]).tolist() return out except Exception: raise TypeError( 'unsupported subsctipting type {}'.format(type(idx)))
def ReadNextBatch(): '''Reads the Next (grey,Color) Batch and computes the Color_Images_batch Chrominance (AB colorspace values) Return: GreyImages_Batch: List with all Greyscale images [Batch size,224,224,1] ColorImages_Batch: List with all Colored images [Batch size,Colored images] ''' global GreyImages_Batch global ColorImages_Batch global CurrentBatch_indx global Batch_size GreyImages_Batch = [] ColorImages_Batch = [] for ind in range(Batch_size): Colored_img = Image.open(ColorImgsPath + str(CurrentBatch_indx) + '.png') ColorImages_Batch.append(Colored_img) Grey_img = Image.open(GreyImgsPath + str(CurrentBatch_indx) + '.png') Grey_img = np.asanyarray(Grey_img) img_shape = Grey_img.shape img_reshaped = Grey_img.reshape(img_shape[0],img_shape[1], GreyChannels)#[224,224,1] GreyImages_Batch.append(img_reshaped)#[#imgs,224,224,1] CurrentBatch_indx = CurrentBatch_indx + 1 Get_Batch_Chrominance() return GreyImages_Batch
def periodogram(self, attr): is_equispaced = self.data.time_delta is not None if is_equispaced: x = np.ravel(self.data.interp(attr)) periods, pgram = periodogram_equispaced(x) # TODO: convert periods into time_values-relative values, i.e. # periods *= self.data.time_delta; like lombscargle already does # periods *= self.data.time_delta else: times = np.asanyarray(self.data.time_values, dtype=float) x = np.ravel(self.data[:, attr]) # Since lombscargle works with explicit times, # we can skip any nan values nonnan = ~np.isnan(x) if not nonnan.all(): x, times = x[nonnan], times[nonnan] periods, pgram = periodogram_nonequispaced(times, x) return periods, pgram
def gauss(mu, sigma, n_obs=50, batch_size=1, random_state=None): """Sample the 1-D Gaussian distribution. Parameters ---------- mu : float, array_like sigma : float, array_like n_obs : int, optional batch_size : int, optional random_state : np.random.RandomState, optional Returns ------- array_like 1-D observations. """ # Transforming the arrays' shape to be compatible with batching. batches_mu = np.asanyarray(mu).reshape((-1, 1)) batches_sigma = np.asanyarray(sigma).reshape((-1, 1)) # Sampling observations. y_obs = ss.norm.rvs(loc=batches_mu, scale=batches_sigma, size=(batch_size, n_obs), random_state=random_state) return y_obs
def _unnormalized_loglikelihood(self, x): x = np.asanyarray(x) ndim = x.ndim x = x.reshape((-1, self.dim)) logpdf = -np.ones(len(x)) * np.inf logi = self._within_bounds(x) x = x[logi, :] if len(x) == 0: if ndim == 0 or (ndim == 1 and self.dim > 1): logpdf = logpdf[0] return logpdf mean, var = self.model.predict(x) logpdf[logi] = ss.norm.logcdf(self.threshold, mean, np.sqrt(var)).squeeze() if ndim == 0 or (ndim == 1 and self.dim > 1): logpdf = logpdf[0] return logpdf
def _add_noise(self, x): # Add noise for more efficient fitting of GP if self.noise_var is not None: noise_var = np.asanyarray(self.noise_var) if noise_var.ndim == 0: noise_var = np.tile(noise_var, self.model.input_dim) for i in range(self.model.input_dim): std = np.sqrt(noise_var[i]) if std == 0: continue xi = x[:, i] a = (self.model.bounds[i][0] - xi) / std b = (self.model.bounds[i][1] - xi) / std x[:, i] = ss.truncnorm.rvs( a, b, loc=xi, scale=std, size=len(x), random_state=self.random_state) return x
def evaluate(graph, mels, label, mapping): """ Check correctness of a file classification """ logging.info('Evaluating audio classification') audio_feature = np.asanyarray(list(mels.flatten()), dtype=np.float32) true_result = mapping[label] x = graph.get_tensor_by_name('prefix/input:0') y = graph.get_tensor_by_name('prefix/softmax_tensor:0') with tf.Session(graph=graph) as sess: # Note: we didn't initialize/restore anything, everything is stored in the graph_def y_out = sess.run(y, feed_dict={ x: [audio_feature] }) logging.info('true value:' + str(true_result)) logging.info('predicted value:' + str(y_out[0].argmax())) logging.info('predictions:' + str(y_out)) if y_out[0].argmax() == true_result: return True else: return False
def toFloat(self, arr, toFloat=True, forceFloat64=False): if hasattr(self, 'preferences'): p = self.preferences toFloat = p.pToFloat.value() forceFloat64 = p.pForceFloat64.value() if not toFloat: return arr arr = np.asanyarray(arr) try: if forceFloat64: dtype = np.float64 else: dtype = {np.dtype('uint8'): np.float32, # float16 is just to coarse and cause nans and infs np.dtype('uint16'): np.float32, np.dtype('uint32'): np.float64, np.dtype('uint64'): np.float64}[arr.dtype] return arr.astype(dtype, copy=False) except KeyError: return arr
def agent_start(self, observation): # Initialize State self.state = observation state_ = np.asanyarray(self.state, dtype=np.float32) # Generate an Action e-greedy action, Q_now = self.DQN.e_greedy(state_, self.epsilon) self.Q_recent = Q_now[0] # Update for next step self.lastAction = action self.last_state = self.state.copy() self.last_observation = observation.copy() self.max_Q_list.append(np.max(self.Q_recent)) return action
def agent_start(self, observation): # Initialize State self.state = observation state_ = cuda.to_gpu(np.asanyarray(self.state, dtype=np.float32),self.gpu_id) # Generate an Action e-greedy action, Q_now = self.DQN.e_greedy(state_, self.epsilon) self.Q_recent = Q_now.get()[0] # Update for next step self.lastAction = action self.last_state = self.state.copy() self.last_observation = observation.copy() self.max_Q_list.append(np.max(self.Q_recent)) return action
def find(a,n=None,d=None,nargout=1): if d: raise NotImplementedError # there is no promise that nonzero or flatnonzero # use or will use indexing of the argument without # converting it to array first. So we use asarray # instead of asanyarray if nargout == 1: i = np.flatnonzero(np.asarray(a)).reshape(1,-1)+1 if n is not None: i = i.take(n) return matlabarray(i) if nargout == 2: i,j = np.nonzero(np.asarray(a)) if n is not None: i = i.take(n) j = j.take(n) return (matlabarray((i+1).reshape(-1,1)), matlabarray((j+1).reshape(-1,1))) raise NotImplementedError
def all_correlations_fast_no_scipy(y, X): ''' Cs = all_correlations(y, X) Cs[i] = np.corrcoef(y, X[i])[0,1] ''' X = np.asanyarray(X, float) y = np.asanyarray(y, float) xy = np.dot(X, y) y_ = y.mean() ys_ = y.std() x_ = X.mean(1) xs_ = X.std(1) n = float(len(y)) ys_ += 1e-5 # Handle zeros in ys xs_ += 1e-5 # Handle zeros in x return (xy - x_ * y_ * n) / n / xs_ / ys_
def _dateToISO(indict): """ covert datetimes to iso strings inside of datamodel attributes """ retdict = dmcopy(indict) if isinstance(indict, dict): for key in indict: if isinstance(indict[key], datetime.datetime): retdict[key] = retdict[key].isoformat() elif hasattr(indict[key], '__iter__'): for idx, el in enumerate(indict[key]): if isinstance(el, datetime.datetime): retdict[key][idx] = el.isoformat() else: if isinstance(indict, datetime.datetime): retdict = retdict.isoformat() elif hasattr(indict, '__iter__'): retdict = numpy.asanyarray(indict) for idx, el in numpy.ndenumerate(indict): if isinstance(el, datetime.datetime): retdict[idx] = el.isoformat() return retdict
def interweave(a, b): """ given two array-like variables interweave them together. Discussed here: http://stackoverflow.com/questions/5347065/interweaving-two-numpy-arrays Parameters ========== a : array-like first array b : array-like second array Returns ======= out : numpy.ndarray interweaved array """ a = np.asanyarray(a) b = np.asanyarray(b) ans = np.empty((a.size + b.size), dtype=a.dtype) ans[0::2] = a ans[1::2] = b return ans
def take(self, indexer, axis=1, verify=True, convert=True): """ Take items along any axis. """ self._consolidate_inplace() indexer = (np.arange(indexer.start, indexer.stop, indexer.step, dtype='int64') if isinstance(indexer, slice) else np.asanyarray(indexer, dtype='int64')) n = self.shape[axis] if convert: indexer = maybe_convert_indices(indexer, n) if verify: if ((indexer == -1) | (indexer >= n)).any(): raise Exception('Indices must be nonzero and less than ' 'the axis length') new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True)
def agent_start(self, observation): # Initialize State self.state = observation state_ = cuda.to_gpu(np.asanyarray(self.state, dtype=np.float32),self.gpu_id) # Generate an Action e-greedy action, Q_now = self.DQN.e_greedy(state_, self.epsilon) # Update for next step self.lastAction = action self.last_state = self.state.copy() self.last_observation = observation.copy() self.max_Q_list.append(np.max(Q_now.get())) return action
def agent_start(self, observation): # Preprocess tmp = np.bitwise_and(np.asarray(observation.intArray[128:]).reshape([210, 160]), 0b0001111) # Get Intensity from the observation obs_array = (spm.imresize(tmp, (110, 84)))[110-84-8:110-8, :] # Scaling # Initialize State self.state = np.zeros((4, 84, 84), dtype=np.uint8) self.state[0] = obs_array state_ = cuda.to_gpu(np.asanyarray(self.state.reshape(1, 4, 84, 84), dtype=np.float32)) # Generate an Action e-greedy returnAction = Action() action, Q_now = self.DDQN.e_greedy(state_, self.epsilon) returnAction.intArray = [action] # Update for next step self.lastAction = copy.deepcopy(returnAction) self.last_state = self.state.copy() self.last_observation = obs_array return returnAction
def start(self,x=None): if x is None: x=Tensor.context obs_array = x.content.data #print "sum",obs_array.sum() # Initialize State self.state = np.zeros((self.func.hist_size, self.image_feature_dim), dtype=np.uint8) self.state[0] = obs_array state_ = np.asanyarray(self.state.reshape(1, self.func.hist_size, self.image_feature_dim), dtype=np.float32) if Deel.gpu >= 0: state_ = cuda.to_gpu(state_) # Generate an Action e-greedy action, Q_now = self.func.e_greedy(state_, self.epsilon) returnAction = action # Update for next step self.lastAction = copy.deepcopy(returnAction) self.last_state = self.state.copy() self.last_observation = obs_array return returnAction
def crop(self, doy, depth, lat, lon, var): """ Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380], and the equivalent for day of year above 365. """ dims, idx = cropIndices(self.dims, lat, lon, depth, doy) subset = {} for v in var: subset[v] = ma.asanyarray([ self.ncs[tnn][v][0, idx['zn'], idx['yn'], idx['xn']] \ for tnn in idx['tn']]) return subset, dims
def __getitem__(self, item): """ t, z, y, x """ tn, zn, yn, xn = item #if type(zn) is not slice: # zn = slice(zn, zn+1) #zn_an = slice(zn.start, min(64, zn.stop), zn.step) #zn_sa = slice(zn.start, min(55, zn.stop), zn.step) output = [] d = 2 * np.pi * (np.arange(1, 367)[tn])/366 for t in np.atleast_1d(d): tmp = self.nc['mean'][:, yn, xn] tmp[:64] += self.nc['an_cos'][:, yn, xn] * np.cos(t) + \ self.nc['an_sin'][:, yn, xn] * np.sin(t) tmp[:55] += self.nc['sa_cos'][:, yn, xn] * np.cos(2*t) + \ self.nc['sa_sin'][:, yn, xn] * np.sin(2*t) output.append(tmp[zn]) return ma.asanyarray(output)
def rotation_2D_to_3D(matrix_2D): ''' Given a 2D homogenous rotation matrix convert it to a 3D rotation matrix that is rotating around the Z axis Arguments ---------- matrix_2D: (3,3) float, homogenous 2D rotation matrix Returns ---------- matrix_3D: (4,4) float, homogenous 3D rotation matrix ''' matrix_2D = np.asanyarray(matrix_2D) if matrix_2D.shape != (3,3): raise ValueError('Homogenous 2D transformation matrix required!') matrix_3D = np.eye(4) # translation matrix_3D[0:2, 3] = matrix_2D[0:2,2] # rotation from 2D to around Z matrix_3D[0:2, 0:2] = matrix_2D[0:2,0:2] return matrix_3D
def unique_ordered(data): ''' Returns the same as np.unique, but ordered as per the first occurance of the unique value in data. Example --------- In [1]: a = [0, 3, 3, 4, 1, 3, 0, 3, 2, 1] In [2]: np.unique(a) Out[2]: array([0, 1, 2, 3, 4]) In [3]: trimesh.grouping.unique_ordered(a) Out[3]: array([0, 3, 4, 1, 2]) ''' data = np.asanyarray(data) order = np.sort(np.unique(data, return_index=True)[1]) result = data[order] return result
def rgba(colors, dtype=None): ''' Convert an RGB color to an RGBA color. Arguments ---------- colors: (n,[3|4]) set of RGB or RGBA colors Returns ---------- colors: (n,4) set of RGBA colors ''' if not is_sequence(colors): return if dtype is None: dtype = COLOR_DTYPE colors = np.asanyarray(colors, dtype=dtype) if is_shape(colors, (-1,3)): opaque = (2**(np.dtype(dtype).itemsize * 8)) - 1 colors = np.column_stack((colors, opaque * np.ones(len(colors)))).astype(dtype) return colors
def lines_to_path(lines): ''' Given a set of line segments (n, 2, [2|3]), populate a path ''' lines = np.asanyarray(lines) if is_shape(lines, (-1, (2,3))): result = {'entities' : np.array([Line(np.arange(len(lines)))]), 'vertices' : lines} return result elif is_shape(lines, (-1,2,(2,3))): entities = [Line([i, i+1]) for i in range(0, (lines.shape[0]*2) - 1, 2)] vertices = lines.reshape((-1,lines.shape[2])) result = {'entities' : entities, 'vertices' : vertices} else: raise ValueError('Lines must be (n,(2|3)) or (n,2,(2|3))') return result
def resample_spline(points, smooth=.001, count=None): from scipy.interpolate import splprep, splev if count is None: count = len(points) points = np.asanyarray(points) closed = np.linalg.norm(points[0] - points[-1]) < tol.merge tpl = splprep(points.T, s=smooth)[0] i = np.linspace(0.0, 1.0, count) resampled = np.column_stack(splev(i, tpl)) if closed: shared = resampled[[0,-1]].mean(axis=0) resampled[0] = shared resampled[-1] = shared return resampled
def points_to_spline_entity(points, smooth=.0005, count=None): from scipy.interpolate import splprep if count is None: count = len(points) points = np.asanyarray(points) closed = np.linalg.norm(points[0] - points[-1]) < tol.merge knots, control, degree = splprep(points.T, s=smooth)[0] control = np.transpose(control) index = np.arange(len(control)) if closed: control[0] = control[[0,-1]].mean(axis=0) control = control[:-1] index[-1] = index[0] entity = BSpline(points = index, knots = knots, closed = closed) return entity, control
def zero_pad(data, count, right=True): ''' Arguments -------- data: (n) length 1D array count: int Returns -------- padded: (count) length 1D array if (n < count), otherwise length (n) ''' if len(data) == 0: return np.zeros(count) elif len(data) < count: padded = np.zeros(count) if right: padded[-len(data):] = data else: padded[:len(data)] = data return padded else: return np.asanyarray(data)
def transform_points(points, matrix, translate=True): ''' Returns points, rotated by transformation matrix If points is (n,2), matrix must be (3,3) if points is (n,3), matrix must be (4,4) Arguments ---------- points: (n, 2|3) set of points matrix: (3|4, 3|4) rotation matrix translate: boolean, apply translation from matrix or not ''' points = np.asanyarray(points) dimension = points.shape[1] column = np.zeros(len(points)) + int(bool(translate)) stacked = np.column_stack((points, column)) transformed = np.dot(matrix, stacked.T).T[:,0:dimension] return transformed
def plot_points(points, show=True): import matplotlib.pyplot as plt points = np.asanyarray(points) dimension = points.shape[1] if dimension == 3: fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(*points.T) elif dimension == 2: plt.scatter(*points.T) else: raise ValueError('Points must be 2D or 3D, not %dD', dimension) if show: plt.show()
def contains_points(mesh, points): ''' Check if a mesh contains a set of points, using ray tests. If the point is on the surface of the mesh, behavior is undefined. Arguments --------- mesh: Trimesh object points: (n,3) points in space Returns --------- contains: (n) boolean array, whether point is inside mesh or not ''' points = np.asanyarray(points) vector = unitize([0,0,1]) rays = np.column_stack((points, np.tile(vector,(len(points),1)))).reshape((-1,2,3)) hits = mesh.ray.intersects_location(rays) hits_count = np.array([len(i) for i in hits]) contains = np.mod(hits_count, 2) == 1 return contains
def calculate_bin_widths(edges): """ Calculate the widths of wavelengths bins given their edges. Parameters ---------- edges : array_like Sequence of bin edges. Must be 1D and have at least two values. Returns ------- widths : ndarray Array of bin widths. Will be 1D and have one less value than ``edges``. """ edges = np.asanyarray(edges) if edges.ndim != 1: raise ValueError('edges input array must be 1D.') if edges.size < 2: raise ValueError('edges input must have at least two values.') return edges[1:] - edges[:-1]
def zscore(a, axis=0, ddof=0): a = np.asanyarray(a) mns = a.mean(axis=axis) sstd = a.std(axis=axis, ddof=ddof) if axis and mns.ndim < a.ndim: res = (((a - np.expand_dims(mns, axis=axis)) / np.expand_dims(sstd,axis=axis))) else: res = (a - mns) / sstd return np.nan_to_num(res)
def _assert_all_finite(X): """Like assert_all_finite, but only for ndarray.""" X = np.asanyarray(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum()) and not np.isfinite(X).all()): raise ValueError("Input contains NaN, infinity" " or a value too large for %r." % X.dtype)
def _prepare_colorarray(arr): """Check the shape of the array and convert it to floating point representation. """ arr = np.asanyarray(arr) if arr.ndim not in [3, 4] or arr.shape[-1] != 3: msg = ("the input array must be have a shape == (.., ..,[ ..,] 3)), " + "got (" + (", ".join(map(str, arr.shape))) + ")") raise ValueError(msg) return dtype.img_as_float(arr)
def cum_returns_final(returns, starting_value=0): """ Compute total returns from simple returns. Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy as a percentage, noncumulative. - Time series with decimal returns. - Example: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902. starting_value : float, optional The starting returns. Returns ------- float """ if len(returns) == 0: return np.nan return cum_returns(np.asanyarray(returns), starting_value=starting_value)[-1]
def annual_return(returns, period=DAILY, annualization=None): """Determines the mean annual growth rate of returns. Parameters ---------- returns : pd.Series or np.ndarray Periodic returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are: 'monthly':12 'weekly': 52 'daily': 252 annualization : int, optional Used to suppress default values available in `period` to convert returns into annual returns. Value should be the annual frequency of `returns`. Returns ------- float Annual Return as CAGR (Compounded Annual Growth Rate). """ if len(returns) < 1: return np.nan ann_factor = annualization_factor(period, annualization) num_years = float(len(returns)) / ann_factor start_value = 100 # Pass array to ensure index -1 looks up successfully. end_value = cum_returns(np.asanyarray(returns), starting_value=start_value)[-1] cum_returns_final = (end_value - start_value) / start_value annual_return = (1. + cum_returns_final) ** (1. / num_years) - 1 return annual_return
def stability_of_timeseries(returns): """Determines R-squared of a linear fit to the cumulative log returns. Computes an ordinary least squares linear fit, and returns R-squared. Parameters ---------- returns : pd.Series or np.ndarray Daily returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. Returns ------- float R-squared. """ if len(returns) < 2: return np.nan returns = np.asanyarray(returns) returns = returns[~np.isnan(returns)] cum_log_returns = np.log1p(returns).cumsum() rhat = stats.linregress(np.arange(len(cum_log_returns)), cum_log_returns)[2] return rhat ** 2