我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.array_str()。
def iter_tango_logs(directory, logs, topics=[]): for log in logs: directory = os.path.expanduser(os.path.join(args.directory, log)) print('Accessing Tango directory {:}'.format(directory)) dataset = TangoLogReader(directory=directory, scale=im_scale) for item in dataset.iterframes(topics=topics): bboxes = item.bboxes targets = item.coords # # If RGB_VIO, RGB, RGB_VIO in stream, then interpolate pose # # b/w the 1st and 3rd timestamps to match RGB timestamps # if len(self.__item_q) >= 3 and \ # self.__item_q[-1][0] == self.__item_q[-3][0] == 1 and \ # self.__item_q[-2][0] == 0: # t1,t2,t3 = self.__item_q[-3][1], self.__item_q[-2][1], self.__item_q[-1][1] # w2, w1 = np.float32([t2-t1, t3-t2]) / (t3-t1) # p1,p3 = self.__item_q[-3][2], self.__item_q[-1][2] # p2 = p1.interpolate(p3, w1) # self.on_frame(t2, t2, p2, self.__item_q[-2][2]) # print np.array_str(np.float64([t1, t2, t3]) * 1e-14, precision=6, suppress_small=True), \ # (t2-t1) * 1e-6, (t3-t2) * 1e-6, w1, w2, p2
def array_str(arr, max_line_width=None, precision=None, suppress_small=None): """Returns the string representation of the content of an array. Args: arr (array_like): Input array. It should be able to feed to :func:`cupy.asnumpy`. max_line_width (int): The maximum number of line lengths. precision (int): Floating point precision. It uses the current printing precision of NumPy. suppress_small (bool): If ``True``, very small number are printed as zeros. .. seealso:: :func:`numpy.array_str` """ return numpy.array_str(cupy.asnumpy(arr), max_line_width, precision, suppress_small)
def insert_data(self,tablename,clmn): lenr = len(clmn) if lenr == 0: print('go') return lenc = len(clmn[0]) sqlline ='' for indr in range(lenr): #print(np.array_str(clmn[indr]).replace('[','').replace(']','').replace(' ',',')) sqlline = np.array_str(clmn[indr]).replace('[','').replace(']','').replace(' ',',') sql = 'insert into '+tablename+' VALUES ('+ sqlline+');' #print(sql) try: # print('insert into '+tablename+' VALUES ('+ sqlline+');') self.cur.execute(sql) except Exception as e: print('err:',e) sqlline='' return
def predict(): # get data from drawing canvas and save as image parseImage(request.get_data()) # read parsed image back in 8-bit, black and white mode (L) x = imread('output.png', mode='L') x = np.invert(x) x = imresize(x,(28,28)) # reshape image data for use in neural network x = x.reshape(1,28,28,1) with graph.as_default(): out = model.predict(x) print(out) print(np.argmax(out, axis=1)) response = np.array_str(np.argmax(out, axis=1)) return response
def __str__(self): s = '=======================================\n' s += 'classname : %s\n' %self.__class__.__name__ s += 'sample rate : %.1f [Hz]\n' %self.fs s += 'channels : %i\n' %self.ch s += 'duration : %.3f [s]\n' %self.duration s += 'datatype : %s\n' %self.samples.dtype s += 'samples per ch : %i\n' %self.nofsamples s += 'data size : %.3f [Mb]\n' %(self.samples.nbytes/(1024*1024)) s += 'has comment : %s\n' %('yes' if len(self._comment)!=0 else 'no') if self.ch != 0: # += '-----------------:---------------------\n' s += 'peak : %s\n' %np.array_str(self.peak()[0], precision=4, suppress_small=True) s += 'RMS : %s\n' %np.array_str(self.rms(), precision=4, suppress_small=True) s += 'crestfactor : %s\n' %np.array_str(self.crest_factor(), precision=4, suppress_small=True) s += '-----------------:---------------------\n' return s
def predictAisMeasurements(self, scanTime, aisMeasurements): import pymht.models.pv as model import pymht.utils.kalman as kalman assert len(aisMeasurements) > 0 aisPredictions = AisMessageList(scanTime) scanTimeString = datetime.datetime.fromtimestamp(scanTime).strftime("%H:%M:%S.%f") for measurement in aisMeasurements: aisTimeString = datetime.datetime.fromtimestamp(measurement.time).strftime("%H:%M:%S.%f") log.debug("Predicting AIS (" + str(measurement.mmsi) + ") from " + aisTimeString + " to " + scanTimeString) dT = scanTime - measurement.time assert dT >= 0 state = measurement.state A = model.Phi(dT) Q = model.Q(dT) x_bar, P_bar = kalman.predict(A, Q, np.array(state, ndmin=2), np.array(measurement.covariance, ndmin=3)) aisPredictions.measurements.append( AIS_prediction(model.C_RADAR.dot(x_bar[0]), model.C_RADAR.dot(P_bar[0]).dot(model.C_RADAR.T), measurement.mmsi)) log.debug(np.array_str(state) + "=>" + np.array_str(x_bar[0])) aisPredictions.aisMessages.append(measurement) assert len(aisPredictions.measurements) == len(aisMeasurements) return aisPredictions
def get_moving_average(x, n, type_str): #compute an n period moving average. #type is 'simple' | 'exponential' my_list=[] x = np.asarray(x) if type_str == 'simple': weights = np.ones(n) elif type_str == 'exponential': weights = np.exp(np.linspace(-1., 0., n)) elif type_str == 'weight': weights = np.flipud(np.arange(1,n+1, dtype=float)) weights /= weights.sum() a = np.convolve(x, weights, mode='full')[:len(x)] a[:n] = a[n] for i in xrange (0, len(a), 1): my_list.append(np.array_str(a[i])) return my_list
def __repr__(self): s = super().__repr__() s += f'Chains num: {self.chains_num}\n' s += f'Batch size: {self.batch_size}\n' s += f'Position size: {self.position_size}\n' s += f'Precisions: noise = {self.noise_precision}, weights = {self.weights_precision}\n' s += f'Resample precision: noise = {self.resample_noise_precision}, ' s += f'weights = {self.resample_weights_precision}\n' s += f'Burn in: {self.burn_in}\n' s += f'Seek step sizes: {self.seek_step_sizes}\n' s += f'Anneal step sizes: {self.anneal_step_sizes}\n' s += f'Fade in velocities: {self.fade_in_velocities}\n' s += 'Step sizes: {}\n'.format(np.array_str(self.step_sizes).replace('\n', '')) s += 'Step probabilities: {}\n'.format(np.array_str(self.step_probabilities).replace('\n', '')) return s
def Sign(**kwargs): ''' Algorithm 1, Pg 12 of BLISS paper o/p: z,c ''' msg, A, S, m, n, sd, q, M, kappa = kwargs['msg'], kwargs['A'], kwargs['S'], kwargs['m'], kwargs['n'], kwargs['sd'], kwargs['q'], kwargs['M'], kwargs['kappa'] m_bar = m + n D = DiscreteGaussianDistributionLatticeSampler(ZZ**m_bar, sd) count = 0 while(True): y = np.array(D()) # m' x 1 reduced_Ay = util.vector_to_Zq(np.matmul(A, y), 2*q) c = hash_iterative(np.array_str(reduced_Ay) + msg, n, kappa) # still not the hash but this is test run b = util.crypt_secure_randint(0, 1) Sc = np.matmul(S,c) z = y + ((-1)**b) * Sc try: exp_term = exp(float(Sc.dot(Sc)) / (2*sd**2)) cosh_term = np.cosh(float(z.dot(Sc)) / (sd**2)) val = exp_term / (cosh_term * M) except OverflowError: print "OF" continue if(random.random() < min(val, 1.0)): break if(count > 10): # beyond 4 rejection sampling iterations are not expected in general raise ValueError("The number of rejection sampling iterations are more than expected") count += 1 return z, c
def Verify(**kwargs): msg, A, m, n, sd, q, eta, z, c, kappa = kwargs['msg'], kwargs['A'], kwargs['m'], kwargs['n'], kwargs['sd'], kwargs['q'], kwargs['eta'], kwargs['z'], kwargs['c'], kwargs['kappa'] B2 = eta*sd*np.sqrt(m) reduced_prod = util.vector_to_Zq(np.matmul(A,z) + q*c, 2*q) #print np.sqrt(z.dot(z)),B2 #print LA.norm(z,np.inf),float(q)/4 if np.sqrt(z.dot(z)) > B2 or LA.norm(z,np.inf) >= float(q)/4: return False if np.array_equal(c, hash_iterative(np.array_str(reduced_prod)+msg, n, kappa)): return True return False
def hash_to_baseb(matrix, message, b, k): ''' i/p: matrix : numpy array to be hashed message : string that the sender sends o/p: list with k elements each b/w 0 to b-1 ''' hexval = hl.sha512(np.array_str(matrix) + message).hexdigest() # returns a string with 128 hex digits return np.array(map(int, list(b2b(hexval, 16, b)[:k]))) # returns first k digits from hexval in a list # this list of symbols allows conversion of numbers represented until base 36
def _nn_pose_fill(valid): """ Looks up closest True for each False and returns indices for fill-in-lookup In: [True, False, True, ... , False, True] Out: [0, 0, 2, ..., 212, 212] """ valid_inds, = np.where(valid) invalid_inds, = np.where(~valid) all_inds = np.arange(len(valid)) all_inds[invalid_inds] = -1 for j in range(10): fwd_inds = valid_inds + j bwd_inds = valid_inds - j # Forward fill invalid_inds, = np.where(all_inds < 0) fwd_fill_inds = np.intersect1d(fwd_inds, invalid_inds) all_inds[fwd_fill_inds] = all_inds[fwd_fill_inds-j] # Backward fill invalid_inds, = np.where(all_inds < 0) if not len(invalid_inds): break bwd_fill_inds = np.intersect1d(bwd_inds, invalid_inds) all_inds[bwd_fill_inds] = all_inds[bwd_fill_inds+j] # Check if any missing invalid_inds, = np.where(all_inds < 0) if not len(invalid_inds): break # np.set_printoptions(threshold=np.nan) # print valid.astype(np.int) # print np.array_str(all_inds) # print np.where(all_inds < 0) return all_inds
def __repr__(self): return 'rpy (rxyz): %s tvec: %s' % \ (np.array_str(self.quat.to_rpy(axes='rxyz'), precision=2, suppress_small=True), np.array_str(self.tvec, precision=2, suppress_small=True)) # return 'quat: %s, tvec: %s' % (self.quat, self.tvec)
def __repr__(self): return 'real: %s dual: %s' % \ (np.array_str(self.real, precision=2, suppress_small=True), np.array_str(self.dual, precision=2, suppress_small=True))
def __repr__(self): return 'Pose ID: %i, rpy (rxyz): %s tvec: %s' % \ (self.id, np.array_str(self.quat.to_rpy(axes='rxyz'), precision=2), np.array_str(self.tvec, precision=2))
def __str__(self): return 'Metadata : ' + ' '.join(map(str, self.metadata)) + '\n' + \ "Pose : " + "\n" + np.array_str(self.pose)
def predict(): #whenever the predict method is called, we're going #to input the user drawn character as an image into the model #perform inference, and return the classification #get the raw data format of the image imgData = request.get_data() #encode it into a suitable format convertImage(imgData) print "debug" #read the image into memory x = imread('output.png',mode='L') #compute a bit-wise inversion so black becomes white and vice versa x = np.invert(x) #make it the right size x = imresize(x,(28,28)) #imshow(x) #convert to a 4D tensor to feed into our model x = x.reshape(1,28,28,1) print "debug2" #in our computation graph with graph.as_default(): #perform the prediction out = model.predict(x) print(out) print(np.argmax(out,axis=1)) print "debug3" #convert the response to a string response = np.array_str(np.argmax(out,axis=1)) return response
def analysis(N, M, I, L, sqrt=False): """ Conduct a Kalman filter validation experiment. Output results (concerning the error, i.e., x_hat - x) for the last time step only. If *sqrt* use the square root form Kalman filter. """ sim = setup_random_test(N, M, I, L) if sqrt: post = sqrt_kf_sim(sim) else: post = kf_sim(sim) # output statistics of \hat{x}_{I|I} error_I = [] for l in range(sim['L']): error_I.append(post[l]['error'][-1]) E_I = NP.stack(error_I, 1) E_I_mean = NP.mean(E_I, 1) P_I = NP.cov(E_I) print('Mean of error at time step I={}'.format(I)) for E_I_mean_n in E_I_mean: print('{:9.2e}'.format(E_I_mean_n)) print('') print('True posterior covariance at time step I') print(NP.array_str(post['P'][-1], precision=2)) print('') print('Empirical posterior covariance at time step I') print(NP.array_str(P_I, precision=2)) return sim, post
def debug_embeddingd(model, when, logger): embeddings_tensor = tflearn.variables.get_layer_variables_by_name('embedding')[0] w = model.get_weights(embeddings_tensor) for line in w: logger.log(np.array_str(line), logname=when, maxlogs=10)
def PrintParams(self, lat, lon): sn, se, su = ct2lg(self.sin[0:self.frequencies], self.sin[self.frequencies:self.frequencies * 2], self.sin[self.frequencies * 2:self.frequencies * 3], lat, lon) cn, ce, cu = ct2lg(self.cos[0:self.frequencies], self.cos[self.frequencies:self.frequencies * 2], self.cos[self.frequencies * 2:self.frequencies * 3], lat, lon) # calculate the amplitude of the components an = np.sqrt(np.square(sn) + np.square(cn)) ae = np.sqrt(np.square(se) + np.square(ce)) au = np.sqrt(np.square(su) + np.square(cu)) return 'Periodic amp [annual semi] N: %s E: %s U: %s [mm]' % ( np.array_str(an * 1000.0, precision=1), np.array_str(ae * 1000.0, precision=1), np.array_str(au * 1000.0, precision=1))
def test_array_str_64bit(self, level=rlevel): # Ticket #501 s = np.array([1, np.nan], dtype=np.float64) with np.errstate(all='raise'): np.array_str(s) # Should succeed
def test_array_str(self): a = testing.shaped_arange((2, 3, 4), cupy) b = testing.shaped_arange((2, 3, 4), numpy) self.assertEqual(cupy.array_str(a), numpy.array_str(b))
def get_label(self, output,min_accuracy=0.6): counts = np.bincount(output) accuracy = (np.amax(counts)/output.shape[0]) if self.verbose: self.message += "\nCounts: %s" %np.array_str(counts) self.message += "\nAccuracy: %.2f%%" %(accuracy*100) if accuracy < min_accuracy: self.message += "\nWho the fuck are you" else: self.message += "\nThe user is %s" %self.users[np.argmax(counts)] return self.message # set for delta mode
def write_pose_pair_to_csv_line(self, pose_pair_idx): singular_values = self.singular_values[pose_pair_idx] return "{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n".format( self.algorithm_name, pose_pair_idx, self.iteration_num, self.prefiltering_enabled, self.dataset_names[pose_pair_idx][0], self.dataset_names[pose_pair_idx][1], self.success[pose_pair_idx], self.rmse[pose_pair_idx][0], self.rmse[pose_pair_idx][1], self.num_inliers[pose_pair_idx], self.num_initial_poses[pose_pair_idx], self.num_poses_kept[pose_pair_idx], self.runtimes[pose_pair_idx], self.loop_error_position, self.loop_error_orientation, ("" if singular_values is None else np.array_str( singular_values, max_line_width=1000000)), self.bad_singular_value[pose_pair_idx], self.optimization_enabled, self.optimization_success[pose_pair_idx], self.optimization_runtime[pose_pair_idx], self.spoiled_initial_guess_angle_offset[pose_pair_idx], (None if self.spoiled_initial_guess_translation_offset[pose_pair_idx] is None else np.array_str( self.spoiled_initial_guess_translation_offset[pose_pair_idx], max_line_width=1000000)), self.spoiled_initial_guess_time_offset[pose_pair_idx])
def binary_superset(n, indices): superset = np.zeros(max(indices)+1, dtype=int) print(list(bin(n)[2:])) superset[indices] = list(bin(n)[2:]) print(superset) return int(np.array_str(superset)[1:-1:2], 2)
def __init__(self, volume, shape, label_margin=None): self.volume = volume self.shape = shape self.margin = np.floor_divide(self.shape, 2).astype(np.int64) if label_margin is None: label_margin = np.zeros(3, dtype=np.int64) self.label_margin = label_margin self.skip_blank_sections = True self.ctr_min = self.margin self.ctr_max = (np.array(self.volume.shape) - self.margin - 1).astype(np.int64) self.random = np.random.RandomState(CONFIG.random_seed) # If the volume has a mask channel, further limit ctr_min and # ctr_max to lie inside a margin in the AABB of the mask. if self.volume.mask_data is not None: mask_min, mask_max = self.volume.mask_bounds mask_min = self.volume.local_coord_to_world(mask_min) mask_max = self.volume.local_coord_to_world(mask_max) self.ctr_min = np.maximum(self.ctr_min, mask_min + self.label_margin) self.ctr_max = np.minimum(self.ctr_max, mask_max - self.label_margin - 1) if np.any(self.ctr_min >= self.ctr_max): raise ValueError('Cannot generate subvolume bounds: bounds ({}, {}) too small for shape ({})'.format( np.array_str(self.ctr_min), np.array_str(self.ctr_max), np.array_str(self.shape)))
def get_largest_component(self, closing_shape=None): mask, bounds = self._get_bounded_mask(closing_shape) label_im, num_labels = ndimage.label(mask) label_sizes = ndimage.sum(mask, label_im, range(num_labels + 1)) label_im[(label_sizes < label_sizes.max())[label_im]] = 0 label_im = np.minimum(label_im, 1) if label_im[tuple(self.seed - bounds[0])] == 0: logging.warning('Seed voxel ({}) is not in connected component.'.format(np.array_str(self.seed))) return label_im, bounds
def get_seeded_component(self, closing_shape=None): mask, bounds = self._get_bounded_mask(closing_shape) label_im, _ = ndimage.label(mask) seed_label = label_im[tuple(self.seed - bounds[0])] if seed_label == 0: raise ValueError('Seed voxel (%s) is not in body.', np.array_str(self.seed)) label_im[label_im != seed_label] = 0 label_im[label_im == seed_label] = 1 return label_im, bounds
def to_swc(self, filename): component, bounds = self.get_largest_component(closing_shape=CONFIG.postprocessing.closing_shape) print('Skeleton is within {}, {}'.format(np.array_str(bounds[0]), np.array_str(bounds[1]))) skel = skeletonize_component(component) swc = skeleton_to_swc(skel, bounds[0], CONFIG.volume.resolution) with open(filename, 'w') as swcfile: writer = csv.writer(swcfile, delimiter=' ', quoting=csv.QUOTE_NONE) writer.writerows(swc)
def __str__(self): s = 'Column names: ' + ', '.join(self.column_names) + "\n" s += 'Data: ' + "\n" s += np.array_str(self.get_all_columns()) return s
def _gmmModel(self,model_path): ''' load gmmModel :return: ''' for state in list(set(self.transcription)): pkl_file = open(os.path.join(model_path,state+'.pkl'), 'rb') self.gmmModel[state] = pickle.load(pkl_file) pkl_file.close() # with open ('/Users/gong/desktop/original.txt','wb') as f: # for key in self.gmmModel: # f.write(np.array_str(self.gmmModel[key].covars_))
def _print(self, tensor_values): if not tensor_values: return for k, v in tensor_values.items(): tf.logging.info("%s: %s", k, np.array_str(v))
def _batch_print(self, tensor_values): if not tensor_values: return batch_size = tensor_values.values()[0].shape[0] for i in range(min(self._first_k, batch_size)): for k, v in tensor_values.items(): tf.logging.info("%s: %s", k, np.array_str(v[i]))
def __str__(self): return 'ANN model\nNodes: %u' % (self.nodes) + \ '\nOpenCL:\n ' + str(self.openCL.devList) + \ '\nwHL:\n' + np.array_str(self.weights.wHL) + \ '\nbHL:\n' + np.array_str(self.weights.bHL) + \ '\nwOL:\n' + np.array_str(self.weights.wOL) + \ '\nbOL:\n' + np.array_str(self.weights.bOL)
def pretty_string_samples(self, idx_start=0, idx_end=20, precision=4, header=False): s = '' if header: t = ' ' u = 'ch' for i in range(self.ch): t += '-------:' u += ' %2i :' %(i+1) t += '\n' u += '\n' s += t # -------:-------:-------: s += u # ch 1 : 2 : 3 : s += t # -------:-------:-------: s += np.array_str(self.samples[idx_start:idx_end,:], max_line_width=260, # we can print 32 channels before linewrap precision=precision, suppress_small=True) if (idx_end-idx_start) < self.nofsamples: s = s[:-1] # strip the right ']' character s += '\n ...,\n' lastlines = np.array_str(self.samples[-3:,:], max_line_width=260, precision=precision, suppress_small=True) s += ' %s\n' %lastlines[1:] # strip first '[' return s
def convert_list_of_ints_to_string(array_of_ints): return re.sub('\s+', ',', np.array_str(array_of_ints).strip('[]'))
def convert_list_of_ints_to_string(list_of_ints): return re.sub('\s+', ',', np.array_str(list_of_ints).strip('[]'))
def convert_array_of_ints_to_string(array_of_ints): return np.array_str(array_of_ints).strip('[]')
def __analyzeTrackTermination(self): deadTracks = [] for trackIndex, trackNode in enumerate(self.__trackNodes__): # Check outside radarRange if trackNode.isOutsideRange(self.position, self.radarRange): trackNode.status = outofrangeTag deadTracks.append(trackIndex) log.info("Terminating track {0:} at {1:} since it is out of radarRange".format( trackIndex, np.array_str(self.__trackNodes__[trackIndex].x_0[0:2]))) # Check if track is to insecure elif trackNode.getScore() / (self.N + 1) > self.scoreUpperLimit: trackNode.status = toolowscoreTag deadTracks.append(trackIndex) log.info("Terminating track {0:} at {1:} since its score is above the threshold ({2:.1f}>{3:.1f})".format( trackIndex, np.array_str(self.__trackNodes__[trackIndex].x_0[0:2]), trackNode.getScore() / (self.N + 1), self.scoreUpperLimit)) elif trackNode.cumulativeNLLR > self.clnnrUpperLimit: trackNode.status = toolowscoreTag deadTracks.append(trackIndex) log.info( "Terminating track {0:} at {1:} since its CNNLR is above the threshold ({2:.1f}>{3:.1f})".format( trackIndex, np.array_str( self.__trackNodes__[trackIndex].x_0[0:2]), trackNode.cumulativeNLLR, self.clnnrUpperLimit)) return deadTracks
def __str__(self): mmsiString = 'MMSI: ' + str(self.mmsi) if self.mmsi is not None else "" stateString = np.array_str(self.state, precision=1) covarianceString = 'Covariance diagonal: ' + np.array_str(np.diagonal(self.covariance), precision=1, suppress_small=True) return (stateString + " " + covarianceString + " " + mmsiString)
def __str__(self): return np.array_str(self.mat)
def log_completion(self, record): """Record a completed evaluation to the log. :param record: Record of the function evaluation """ xstr = np.array_str(record.params[0], max_line_width=np.inf, precision=5, suppress_small=True) logger.info("Feasible {:.3e} @ {}".format(record.value, xstr))
def log_completion(self, record, penalty): """Record a completed evaluation to the log. :param record: Record of the function evaluation :param penalty: Penalty for the given point """ xstr = np.array_str(record.params[0], max_line_width=np.inf, precision=5, suppress_small=True) feas = "Feasible" if penalty > 0.0: feas = "Infeasible" #logger.info("{} {:.3e} @ {}".format(feas, record.value + penalty, xstr)) logger.info("{} {:.3e} @ {}".format(feas, record.value, xstr))