我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.array2string()。
def export_collada(mesh): ''' Export a mesh as a COLLADA file. ''' from ..templates import get_template from string import Template template_string = get_template('collada.dae.template') template = Template(template_string) # we bother setting this because np.array2string uses these printoptions np.set_printoptions(threshold=np.inf, precision=5, linewidth=np.inf) replacement = dict() replacement['VERTEX'] = np.array2string(mesh.vertices.reshape(-1))[1:-1] replacement['FACES'] = np.array2string(mesh.faces.reshape(-1))[1:-1] replacement['NORMALS'] = np.array2string(mesh.vertex_normals.reshape(-1))[1:-1] replacement['VCOUNT'] = str(len(mesh.vertices)) replacement['VCOUNTX3'] = str(len(mesh.vertices) * 3) replacement['FCOUNT'] = str(len(mesh.faces)) export = template.substitute(replacement) return export
def var_label(var, precision=3): """Return label of variable node.""" if var.name is not None: return var.name elif isinstance(var, gof.Constant): h = np.asarray(var.data) is_const = False if h.ndim == 0: is_const = True h = np.array([h]) dstr = np.array2string(h, precision=precision) if '\n' in dstr: dstr = dstr[:dstr.index('\n')] if is_const: dstr = dstr.replace('[', '').replace(']', '') return dstr else: return type_to_str(var.type)
def _format_items(items): formatter = lambda x: '%.3e' % x last_large_output = None for key, value in items: value = np.asarray(value) large_output = value.ndim >= 1 # If there was a previous output, print a separator. if last_large_output is not None: yield '\n' if large_output or last_large_output else ' ' format_string = '%s:\n%s' if large_output else '%s: %s' yield format_string % (key, np.array2string(value, style=formatter, formatter={'float_kind': formatter})) last_large_output = large_output
def test_basic(self): """Basic test of array2string.""" a = np.arange(3) assert_(np.array2string(a) == '[0 1 2]') assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
def test_style_keyword(self): """This should only apply to 0-D arrays. See #1218.""" stylestr = np.array2string(np.array(1.5), style=lambda x: "Value in 0-D array: " + str(x)) assert_(stylestr == 'Value in 0-D array: 1.5')
def test_format_function(self): """Test custom format function for each element in array.""" def _format_function(x): if np.abs(x) < 1: return '.' elif np.abs(x) < 2: return 'o' else: return 'O' x = np.arange(3) if sys.version_info[0] >= 3: x_hex = "[0x0 0x1 0x2]" x_oct = "[0o0 0o1 0o2]" else: x_hex = "[0x0L 0x1L 0x2L]" x_oct = "[0L 01L 02L]" assert_(np.array2string(x, formatter={'all':_format_function}) == "[. o O]") assert_(np.array2string(x, formatter={'int_kind':_format_function}) == "[. o O]") assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) == "[0.0000 1.0000 2.0000]") assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}), x_hex) assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}), x_oct) x = np.arange(3.) assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) == "[0.00 1.00 2.00]") assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) == "[0.00 1.00 2.00]") s = np.array(['abc', 'def']) assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) == '[abcabc defdef]')
def test_datetime_array_str(self): a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M') assert_equal(np.array2string(a, separator=', ', formatter={'datetime': lambda x: "'%s'" % np.datetime_as_string(x, timezone='UTC')}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') assert_equal(str(a), "['2010' 'NaT' '2030']")
def arr2str(a): return np.array2string(a, max_line_width=np.inf, separator=',', precision=None, suppress_small=None).replace('\n', '')
def periodicDataDump(filename,d): """ dump a ndarray to disk. If first time, just dump it. Else, load current ary and cat d to it before dumping. """ old=True if len(d)!=0: if os.path.exists(filename): if old: d0 = np.load(filename) np.save(filename,np.concatenate((d0,d))) else: with open(filename,'a') as outfile: for i in range(0,d.shape[0]): outstr = '' for val in d[i]: outstr+='%.14g ' % (val) outstr += '\n' outfile.write(outstr)#nparyTolistStr(d[i],brackets=False,dmtr=' ')+'\n') #outfile.write(re.sub("\n ","\n",re.sub("[\\[\\]]","",np.array2string(d,precision=16)))) else: if old: np.save(filename,d) else: with open(filename,'w') as outfile: for i in range(0,d.shape[0]): outstr = '' for val in d[i]: outstr+='%.14g ' % (val) outstr += '\n' outfile.write(outstr)#nparyTolistStr(d[i],brackets=False,dmtr=' ')) #outfile.write(re.sub("\n ","\n",re.sub("[\\[\\]]","",np.array2string(d)))) #print nparyTolistStr(d[i],brackets=False,dmtr=' ') #raise IOError('\n\n'+s)
def __repr__(self): if len(self.data) >= 1: str = '-----------------------------------------\n' for each in self._list: array = np.asarray(each) str = str + np.array2string(array) \ + '\n-----------------------------------------\n' return str.rstrip("\n") # Remove trailing newline character else: return 'No matrix found'
def matrix_mismatch_string_builder(rec_mat, exp_mat): expected_mat_str = np.array2string(np.asarray(exp_mat)) received_mat_str = np.array2string(np.asarray(rec_mat)) output_str = str("\n----------------------\n" + " Expected Output " + "\n----------------------\n" + expected_mat_str + "\n----------------------\n" + " Received Output " + "\n----------------------\n" + received_mat_str) return output_str
def test_datetime_array_str(self): a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M') assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']") a = np.array(['2011-03-16T13:55Z', '1920-01-01T03:12Z'], dtype='M') assert_equal(np.array2string(a, separator=', ', formatter={'datetime': lambda x: "'%s'" % np.datetime_as_string(x, timezone='UTC')}), "['2011-03-16T13:55Z', '1920-01-01T03:12Z']") # Check that one NaT doesn't corrupt subsequent entries a = np.array(['2010', 'NaT', '2030']).astype('M') assert_equal(str(a), "['2010' 'NaT' '2030']")
def __str__(self): formatter = {'float_kind': lambda x: "{: 7.1f}".format(x) } mmsiStr = " MMSI {:} ".format(self.mmsi) if self.mmsi is not None else "" predStateStr = ("Pred state:" + np.array2string(self.predicted_state, precision=1, suppress_small=True, formatter=formatter) if self.predicted_state is not None else "") return ("State: " + np.array2string(self.state, precision=1, suppress_small=True,formatter=formatter) + " ({0:}|{1:}) ".format(self.m, self.n) + predStateStr + mmsiStr)
def __str__(self): timeString = self.getTimeString() mmsiString = 'MMSI: ' + str(self.mmsi) if self.mmsi is not None else "" return ('Time: ' + timeString + " " + 'State:' + np.array2string(self.state, formatter={'float_kind': lambda x: '{: 7.1f}'.format(x)}) + " " + 'High accuracy: {:1} '.format(self.highAccuracy) + mmsiString)
def main(): n_particles = 6 bounds = [(-0.7, 0.7)] * (3 * n_particles) np.random.seed(1) ret = sda(sutton_chen, None, bounds=bounds) # np.set_printoptions(precision=4) print('xmin =\n{}'.format(np.array2string(ret.x, max_line_width=40))) print("global minimum: f(xmin) = {}".format(ret.fun))
def check_mixed_mode(vis,mode): logger.info('Check for mixed mode') tb.open(vis + '/SPECTRAL_WINDOW') bw_spw = np.array(tb.getcol('TOTAL_BANDWIDTH')) tb.close() if len(np.unique(bw_spw)) != 1: if mode == 'split': logger.info('Splitting continuum from spectral line') cont_spw = np.where(bw_spw==np.max(np.unique(bw_spw)))[0] print np.array2string(cont_spw, separator=',')[1:-1] split(vis=vis, outputvis=vis+'.continuum', spw=np.array2string(cont_spw, separator=',')[1:-1], datacolumn='data') spec_line = np.delete(bw_spw, cont_spw) logger.info('Splitting spectral line') for i in range(len(np.unique(spec_line))): spec_line_spw = np.where(bw_spw==np.unique(spec_line)[i])[0] split(vis=vis, outputvis=vis+'.sp{0}'.format(i), spw=np.array2string(spec_line_spw, separator=',')[1:-1],datacolumn='data') ms.writehistory(message='eMER_CASA_Pipeline: Spectral line split from {0}'.format(vis),msname=vis+'.sp{0}'.format(i)) ms.writehistory(message='eMER_CASA_Pipeline: Spectral lines split from this ms',msname=vis) os.system('mv {0} {1}'.format(vis, vis+'.original')) os.system('mv {0} {1}'.format(vis+'.continuum', vis)) logger.info('Will continue with continuum, original data is {0}'.format(vis+'.original')) return_variable = '' if mode == 'check': logger.info('MS is mixed mode. Please split') return_variable = True else: if mode == 'split': logger.info('Not mixed mode, continuing') return_variable = '' if mode == 'check': return_variable = False return return_variable
def getRefForMatrix(self, matrix): matrix_name = np.array2string(matrix) if matrix_name in self.tdict: return self.tdict[matrix_name] oldidx = self.ntindex self.ntindex += 1 # setup the empty texture array self.matrices[oldidx] = matrix self.tdict[matrix_name] = oldidx # now fill in the values return oldidx
def test_structure_format(self): dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) assert_equal(np.array2string(x), "[('Sarah', [ 8., 7.]) ('John', [ 6., 7.])]") # for issue #5692 A = np.zeros(shape=10, dtype=[("A", "M8[s]")]) A[5:].fill(np.nan) assert_equal(np.array2string(A), "[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) " + "('1970-01-01T00:00:00',)\n ('1970-01-01T00:00:00',) " + "('1970-01-01T00:00:00',) ('NaT',) ('NaT',)\n " + "('NaT',) ('NaT',) ('NaT',)]") # See #8160 struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)]) assert_equal(np.array2string(struct_int), "[([ 1, -1],) ([123, 1],)]") struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)], dtype=[('B', 'i4', (2, 2))]) assert_equal(np.array2string(struct_2dint), "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]") # See #8172 array_scalar = np.array( (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8')) assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)")
def np2flatstr(arr, fmt="% .6g"): return np.array2string( arr, prefix='', separator=' ', formatter={'float_kind':lambda x: fmt % x}) # return ' '.join([fmt % (x) for x in np.asarray(X).flatten()])
def parse_list_str(ar, compres): """ -1: unable to reduce in size 1: broadcastable array todo: for multiline array strings add extra space in line 2, 3.. use array2string-prefix for this """ # max_line_width = 80 # might set this to something different later # precision = 8 # suppress_small = True # to mask some rounding issues if compres == -1: pre = '[' post = ']' ar_str = ', '.join([item.string for item in ar]) string = pre + ar_str + post elif compres == 1: # l = np.unique(ar).size if len(ar) == 1: string = ar[0].string else: pre = str(len(ar)) + ' * [' post = ']' ar_str = ar[0].string string = pre + ar_str + post return string
def main(): num_inputs = len(input_names) num_outputs = len(output_names) tables = np.load(TABLE_FILE) pininput = "\n".join([template_pininput.format(name=name, bit=i) for i, name in enumerate(output_names)]) pininput += "\n\tif(invert_switches(serial_number))\n\t\tinput_value ^= {};\n".format(2**num_outputs-1) lookuptable = ",\n".join([ "// {}\n{{ ".format(configs[i]["title"])+np.array2string(np.sum(np.power(2, np.arange(num_outputs))[None, ::-1] * tables[i, :, num_inputs:], axis=1), separator=", ")[1:-1]+"}" # remove [] on the outside for i in range(len(configs))]) setup = "\n".join(["""\tpinMode(PIN_{name}, OUTPUT);""".format(name=name) for name in input_names]) setup += "\n\n" setup += "\n".join(["""\tpinMode(PIN_{name}, INPUT_PULLUP);""".format(name=name) for name in output_names+["RUN"]]) setdisplay = "\tif(invert_leds(serial_number))\n\t\trandom_value ^= 0xFF;\n\n" setdisplay += "\n".join(["""\tdigitalWrite(PIN_{name}, random_value & (1<<{bit}) ? HIGH : LOW);""".format(name=name, bit=i) for i, name in enumerate(input_names)]) invert_leds_string = "\n".join(("""\tif(serial_number[4]=='{}') return 1;""".format(l) for l in led_inverts)) invert_switches_string = "\n".join(("""\tif( (serial_number[2]=='{}') && (serial_number[3]=='{}') ) return 1;""".format(s[0], s[1]) for s in switch_inverts)) random_value_bitmask = 2**num_inputs-1 with open("autowires.cpp.in", "r") as f: template = f.read() with open("wires_autogen.cpp","w") as f: f.write("// This file has been generated automatically! Do not modify!\n") f.write(template.format( lookuptable=lookuptable, pininput=pininput, setup=setup, setdisplay=setdisplay, invert_switches = invert_switches_string, invert_leds = invert_leds_string, random_value_bitmask = random_value_bitmask, num_tables=len(tables), num_combinations=len(tables[0])))
def key(self, master=False): keystr = remove_bracket(np.array2string(self.pos, separator=',', prefix=')', formatter={'float_kind':lambda x: Point._FORMAT_FLOAT % x})) return keystr # this is an alternative constructor which can be called directly as "Point.fromkey(keystr)"
def main(dataSet): ''' This is the main function ''' directory = "C:/ant/datasets/" if not os.path.exists(directory): os.makedirs(directory) allDataSets = { 1:"load_iris", 2:"load_boston", 3:"load_digits", 4:"load_diabetes"} try: exec("from sklearn.datasets import "+allDataSets[dataSet]) exec("data = "+allDataSets[dataSet]+"()") dumpData = pickle.dumps(data.data) dumpTargets = pickle.dumps(data.target) f = open(directory+allDataSets[dataSet]+"_Data.txt", 'w') f.write(dumpData) f.close() f2 = open(directory+allDataSets[dataSet]+"_Target.txt", 'w') f2.write(dumpTargets) f2.close() data = directory+allDataSets[dataSet]+"_Data.txt" #np.array2string(data.data, separator=",") target = directory+allDataSets[dataSet]+"_Target.txt" featuers = None return [data, target, featuers] except Exception as e: print str(e)
def writeRDF(buffer): while not buffer.empty(): folder, subfolder = buffer.get() print "working on %s" % subfolder img_dirs = [] for path, dirs, files in os.walk(os.path.join(descriptor_path,folder,subfolder)): img_dirs.extend(dirs) break for img_dir in img_dirs: out_images = os.path.join(output_path, images, folder, subfolder) out_descriptors = os.path.join(output_path, descriptors, folder, subfolder) if not os.path.exists(out_images): os.makedirs(out_images) if not os.path.exists(out_descriptors): os.makedirs(out_descriptors) #write rdf file for visual entity image_rdf = open(os.path.join(out_images, img_dir), "w") image_rdf.write(prefixes_im) txt = "imr:%s a imo:Image ;\n" % img_dir txt += "\timo:folder %s ;\n" % folder txt += "\timo:subfolder %s ;\n" % subfolder txt += "\towl:sameAs dbcr:%s ;\n" % img_dir image_rdf.write(txt) image_rdf.close() #write rdf files for each descriptor for path, dirs, files in os.walk(os.path.join(descriptor_path, folder, subfolder, img_dir)): if len(files) < 3: e = open(error_log, "a") txt = "File %s/%s/%s has only %d descriptors\n" % (folder, subfolder, img_dir, len(files)) e.write(txt) e.close() for descriptor_file in files: descriptor = np.load(os.path.join(descriptor_path, folder, subfolder,img_dir,descriptor_file)) descriptor_rdf = open(os.path.join(out_descriptors, descriptor_file), "w") descriptor_rdf.write(prefixes_desc) extension = descriptor_map[descriptor_file[-3:]] txt = "\nimr:%s a imo:%s ;\n" % (descriptor_file[:-3] + extension, extension) txt += "\timo:describes imr:%s ;\n" % (img_dir) txt += "\timo:value \"%s\" ." % (np.array2string(descriptor.T[0], separator=',', max_line_width=100000)) descriptor_rdf.write(txt) descriptor_rdf.close() break buffer.task_done() print "subfolder %s done" % subfolder
def print_hinton(arr, max_arr=None): ''' Print bar string, fast way to visual magnitude of value in terminal Example: ------- >>> W = np.random.rand(10,10) >>> print_hinton(W) >>> ?????? ??? >>> ????? ?? >>> ?????????? >>> ?????????? >>> ? ? ????? >>> ??????? ?? >>> ??? ????? >>> ???? ? ??? >>> ?????????? >>> ??? ??? ?? Returns ------- return : str plot of array, for example: ???? ''' arr = np.asarray(arr) if len(arr.shape) == 1: arr = arr[None, :] def visual_func(val, max_val): if abs(val) == max_val: step = len(_chars) - 1 else: step = int(abs(float(val) / max_val) * len(_chars)) colourstart = "" colourend = "" if val < 0: colourstart, colourend = '\033[90m', '\033[0m' return colourstart + _chars[step] + colourend if max_arr is None: max_arr = arr max_val = max(abs(np.max(max_arr)), abs(np.min(max_arr))) # print(np.array2string(arr, # formatter={'float_kind': lambda x: visual(x, max_val)}, # max_line_width=5000) # ) f = np.vectorize(visual_func) result = f(arr, max_val) # array of ???? rval = '' for r in result: rval += ''.join(r) + '\n' return rval[:-1]
def parse_array_str(ar, orig_shape, compres, precision=8): """ -1: unable to reduce in size 0: a value in a singleton dimension may be used 1: broadcastable array todo: for multiline array strings add extra space in line 2, 3.. use array2string-prefix for this """ max_line_width = np.inf suppress_small = True # to mask some rounding issues np.set_printoptions(threshold=np.inf, linewidth=np.inf) if compres == 0: return str(ar) elif compres == -1: pre = 'np.array(' post = ')' ar_str = np.array2string( np.array(ar), max_line_width=max_line_width, precision=precision, suppress_small=suppress_small, separator=',', prefix=pre) ar_str = ' '.join(ar_str.split()) return pre + ar_str + post elif compres == 1: pre = 'np.broadcast_to(' post = ', {0})'.format(orig_shape) ar_str = np.array2string( ar, max_line_width=max_line_width, precision=precision, suppress_small=suppress_small, separator=',', prefix=pre) ar_str = ' '.join(ar_str.split()) return pre + ar_str + post