我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.float64()。
def test_FFT2(FFT2): N = FFT2.N if FFT2.rank == 0: A = random(N).astype(FFT2.float) else: A = zeros(N, dtype=FFT2.float) atol, rtol = (1e-10, 1e-8) if FFT2.float is float64 else (5e-7, 1e-4) FFT2.comm.Bcast(A, root=0) a = zeros(FFT2.real_shape(), dtype=FFT2.float) c = zeros(FFT2.complex_shape(), dtype=FFT2.complex) a[:] = A[FFT2.real_local_slice()] c = FFT2.fft2(a, c) B2 = zeros(FFT2.global_complex_shape(), dtype=FFT2.complex) B2 = rfft2(A, B2, axes=(0,1)) assert allclose(c, B2[FFT2.complex_local_slice()], rtol, atol) a = FFT2.ifft2(c, a) assert allclose(a, A[FFT2.real_local_slice()], rtol, atol)
def ai_lowpass_cutoff_freq_range_vals(self): """ List[float]: Indicates pairs of lowpass cutoff frequency ranges supported by this device. Each pair consists of the low value, followed by the high value. If the device supports a set of discrete lowpass cutoff frequencies, use **ai_lowpass_cutoff_freq_discrete_vals** to determine the supported frequencies. """ cfunc = lib_importer.windll.DAQmxGetDevAILowpassCutoffFreqRangeVals if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def _write_analog_f_64( task_handle, write_array, num_samps_per_chan, auto_start, timeout, data_layout=FillMode.GROUP_BY_CHANNEL): samps_per_chan_written = ctypes.c_int() cfunc = lib_importer.windll.DAQmxWriteAnalogF64 if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, c_bool32, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, auto_start, timeout, data_layout.value, write_array, ctypes.byref(samps_per_chan_written), None) check_for_error(error_code) return samps_per_chan_written.value
def _write_ctr_freq( task_handle, freq, duty_cycle, num_samps_per_chan, auto_start, timeout, data_layout=FillMode.GROUP_BY_CHANNEL): num_samps_per_chan_written = ctypes.c_int() cfunc = lib_importer.windll.DAQmxWriteCtrFreq if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, c_bool32, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, auto_start, timeout, data_layout.value, freq, duty_cycle, ctypes.byref(num_samps_per_chan_written), None) check_for_error(error_code) return num_samps_per_chan_written.value
def _read_analog_f_64( task_handle, read_array, num_samps_per_chan, timeout, fill_mode=FillMode.GROUP_BY_CHANNEL): samps_per_chan_read = ctypes.c_int() cfunc = lib_importer.windll.DAQmxReadAnalogF64 if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, ctypes.c_double, c_bool32, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, timeout, fill_mode.value, read_array, numpy.prod(read_array.shape), ctypes.byref(samps_per_chan_read), None) check_for_error(error_code) return samps_per_chan_read.value
def _read_counter_f_64(task_handle, read_array, num_samps_per_chan, timeout): samps_per_chan_read = ctypes.c_int() cfunc = lib_importer.windll.DAQmxReadCounterF64 if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, ctypes.c_double, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, timeout, read_array, numpy.prod(read_array.shape), ctypes.byref(samps_per_chan_read), None) check_for_error(error_code) return samps_per_chan_read.value
def _read_counter_f_64_ex( task_handle, read_array, num_samps_per_chan, timeout, fill_mode=FillMode.GROUP_BY_CHANNEL): samps_per_chan_read = ctypes.c_int() cfunc = lib_importer.windll.DAQmxReadCounterF64Ex if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, timeout, fill_mode.value, read_array, numpy.prod(read_array.shape), ctypes.byref(samps_per_chan_read), None) check_for_error(error_code) return samps_per_chan_read.value
def _read_ctr_freq( task_handle, freq, duty_cycle, num_samps_per_chan, timeout, interleaved=FillMode.GROUP_BY_CHANNEL): samps_per_chan_read = ctypes.c_int() cfunc = lib_importer.windll.DAQmxReadCtrFreq if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, timeout, interleaved.value, freq, duty_cycle, numpy.prod(freq.shape), ctypes.byref(samps_per_chan_read), None) check_for_error(error_code) return samps_per_chan_read.value
def _read_ctr_time( task_handle, high_time, low_time, num_samps_per_chan, timeout, interleaved=FillMode.GROUP_BY_CHANNEL): samps_per_chan_read = ctypes.c_int() cfunc = lib_importer.windll.DAQmxReadCtrTime if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), wrapped_ndpointer(dtype=numpy.float64, flags=('C', 'W')), ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, timeout, interleaved.value, high_time, low_time, numpy.prod(high_time.shape), ctypes.byref(samps_per_chan_read), None) check_for_error(error_code) return samps_per_chan_read.value
def test_insufficient_numpy_write_data(self, x_series_device, seed): # Reset the pseudorandom number generator with seed. random.seed(seed) # Randomly select physical channels to test. number_of_channels = random.randint( 2, len(x_series_device.ao_physical_chans)) channels_to_test = random.sample( x_series_device.ao_physical_chans, number_of_channels) with nidaqmx.Task() as task: task.ao_channels.add_ao_voltage_chan( flatten_channel_string([c.name for c in channels_to_test]), max_val=10, min_val=-10) number_of_samples = random.randint(1, number_of_channels - 1) values_to_test = numpy.float64([ random.uniform(-10, 10) for _ in range(number_of_samples)]) with pytest.raises(DaqError) as e: task.write(values_to_test, auto_start=True) assert e.value.error_code == -200524
def transform(self, img, lbl): img = img[:, :, ::-1] img = img.astype(np.float64) img -= self.mean img = m.imresize(img, (self.img_size[0], self.img_size[1])) # Resize scales images from 0 to 255, thus we need # to divide by 255.0 img = img.astype(float) / 255.0 # NHWC -> NCWH img = img.transpose(2, 0, 1) lbl[lbl==255] = 0 lbl = lbl.astype(float) lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F') lbl = lbl.astype(int) img = torch.from_numpy(img).float() lbl = torch.from_numpy(lbl).long() return img, lbl
def transform(self, img, lbl): img = img[:, :, ::-1] img = img.astype(np.float64) img -= self.mean img = m.imresize(img, (self.img_size[0], self.img_size[1])) # Resize scales images from 0 to 255, thus we need # to divide by 255.0 img = img.astype(float) / 255.0 # NHWC -> NCWH img = img.transpose(2, 0, 1) lbl = self.encode_segmap(lbl) classes = np.unique(lbl) lbl = lbl.astype(float) lbl = m.imresize(lbl, (self.img_size[0], self.img_size[1]), 'nearest', mode='F') lbl = lbl.astype(int) assert(np.all(classes == np.unique(lbl))) img = torch.from_numpy(img).float() lbl = torch.from_numpy(lbl).long() return img, lbl
def get_3d_data_slices(slices): # get data in Hunsfield Units slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from v 9 image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images ) image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0 # Convert to Hounsfield units (HU) # The intercept is usually -1024 for slice_number in range(len(slices)): # from v 8 intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: # added 16 Jan 2016, evening image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
def get_pixels_hu(slices): image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # Set outside-of-scan pixels to 0 # The intercept is usually -1024, so air is approximately 0 image[image == -2000] = 0 # Convert to Hounsfield units (HU) ### slope can differ per slice -- so do it individually (case in point black_tset, slices 95 vs 96) ### Changes/correction - 31.01.2017 for slice_number in range(len(slices)): intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
def get_3d_data_slices(slices): # get data in Hunsfield Units #slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] #slices.sort(key=lambda x: int(x.InstanceNumber)) # was x.InstanceNumber slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v 8 image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images ) image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0 # Convert to Hounsfield units (HU) # The intercept is usually -1024 for slice_number in range(len(slices)): # from v 8 intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: # added 16 Jan 2016, evening image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
def get_3d_data_hu(path): # get data in Hunsfield Units slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] #slices.sort(key=lambda x: int(x.InstanceNumber)) # was x.InstanceNumber #slices.sort(key = lambda x: int(x.ImagePositionPatient[2])) # from v8 - BUGGY slices.sort(key = lambda x: float(x.ImagePositionPatient[2])) # from 22.02 image = np.stack([s.pixel_array for s in slices]) image = image.astype(np.int16) # ensure int16 (it may be here uint16 for some images ) image[image == -2000] = 0 #correcting cyindrical bound entrioes to 0 # Convert to Hounsfield units (HU) # The intercept is usually -1024 for slice_number in range(len(slices)): # from v 8 intercept = slices[slice_number].RescaleIntercept slope = slices[slice_number].RescaleSlope if slope != 1: # added 16 Jan 2016, evening image[slice_number] = slope * image[slice_number].astype(np.float64) image[slice_number] = image[slice_number].astype(np.int16) image[slice_number] += np.int16(intercept) return np.array(image, dtype=np.int16)
def _get_dtype_maps(): """ Get dictionaries to map numpy data types to ITK types and the other way around. """ # Define pairs tmp = [ (np.float32, 'MET_FLOAT'), (np.float64, 'MET_DOUBLE'), (np.uint8, 'MET_UCHAR'), (np.int8, 'MET_CHAR'), (np.uint16, 'MET_USHORT'), (np.int16, 'MET_SHORT'), (np.uint32, 'MET_UINT'), (np.int32, 'MET_INT'), (np.uint64, 'MET_ULONG'), (np.int64, 'MET_LONG') ] # Create dictionaries map1, map2 = {}, {} for np_type, itk_type in tmp: map1[np_type.__name__] = itk_type map2[itk_type] = np_type.__name__ # Done return map1, map2
def __linear_quantize(data, q_levels): """ floats in (0, 1) to ints in [0, q_levels-1] scales normalized across axis 1 """ # Normalization is on mini-batch not whole file #eps = numpy.float64(1e-5) #data -= data.min(axis=1)[:, None] #data *= ((q_levels - eps) / data.max(axis=1)[:, None]) #data += eps/2 #data = data.astype('int32') eps = numpy.float64(1e-5) data *= (q_levels - eps) data += eps/2 data = data.astype('int32') return data
def __batch_quantize(data, q_levels, q_type): """ One of 'linear', 'a-law', 'mu-law' for q_type. """ data = data.astype('float64') data = __normalize(data) if q_type == 'linear': return __linear_quantize(data, q_levels) if q_type == 'a-law': return __a_law_quantize(data) if q_type == 'mu-law': # from [0, 1] to [-1, 1] data = 2.*data-1. # Automatically quantized to 256 bins. return __mu_law_quantize(data) raise NotImplementedError
def iter_tango_logs(directory, logs, topics=[]): for log in logs: directory = os.path.expanduser(os.path.join(args.directory, log)) print('Accessing Tango directory {:}'.format(directory)) dataset = TangoLogReader(directory=directory, scale=im_scale) for item in dataset.iterframes(topics=topics): bboxes = item.bboxes targets = item.coords # # If RGB_VIO, RGB, RGB_VIO in stream, then interpolate pose # # b/w the 1st and 3rd timestamps to match RGB timestamps # if len(self.__item_q) >= 3 and \ # self.__item_q[-1][0] == self.__item_q[-3][0] == 1 and \ # self.__item_q[-2][0] == 0: # t1,t2,t3 = self.__item_q[-3][1], self.__item_q[-2][1], self.__item_q[-1][1] # w2, w1 = np.float32([t2-t1, t3-t2]) / (t3-t1) # p1,p3 = self.__item_q[-3][2], self.__item_q[-1][2] # p2 = p1.interpolate(p3, w1) # self.on_frame(t2, t2, p2, self.__item_q[-2][2]) # print np.array_str(np.float64([t1, t2, t3]) * 1e-14, precision=6, suppress_small=True), \ # (t2-t1) * 1e-6, (t3-t2) * 1e-6, w1, w2, p2
def load_ply(fn, version): """ Retrieve aligned point cloud for each scene """ if version == 'v1': raise ValueError('''Version %s not supported. ''' '''Check dataset and choose either v1 or v2 scene dataset''' % version) # P = np.loadtxt(os.path.expanduser(fn), usecols=(2,3,4,5,6,7,8), dtype=np.float64) # return map(lambda p: RigidTransform(Quaternion.from_wxyz(p[:4]), p[4:]), P) elif version == 'v2': ply = PlyData.read(os.path.expanduser(fn)) xyz = np.vstack([ply['vertex'].data['x'], ply['vertex'].data['y'], ply['vertex'].data['z']]).T rgb = np.vstack([ply['vertex'].data['diffuse_red'], ply['vertex'].data['diffuse_green'], ply['vertex'].data['diffuse_blue']]).T return xyz, rgb else: raise ValueError('''Version %s not supported. ''' '''Check dataset and choose either v1 or v2 scene dataset''' % version)
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947]) >>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0))) True """ q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True) nq = numpy.dot(q, q) if nq < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / nq) q = numpy.outer(q, q) return numpy.array(( (1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0), ( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0), ( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0), ( 0.0, 0.0, 0.0, 1.0) ), dtype=numpy.float64)
def test_frame_dtype_error(): nelem = 20 df1 = gd.DataFrame() df1['bad'] = np.arange(nelem) df1['bad'] = np.arange(nelem, dtype=np.float64) df2 = gd.DataFrame() df2['bad'] = np.arange(nelem) df2['bad'] = np.arange(nelem, dtype=np.float32) ddf1 = dgd.from_pygdf(df1, npartitions=5) ddf2 = dgd.from_pygdf(df2, npartitions=5) combined = dgd.from_delayed(ddf1.to_delayed() + ddf2.to_delayed()) with pytest.raises(ValueError) as raises: out = combined.compute() print("out") raises.match(r"^Metadata mismatch found in `from_delayed`.") raises.match(r"\s+\|\s+".join(['bad', 'float32', 'float64']))
def quaternion_matrix(quaternion): """Return homogeneous rotation matrix from quaternion. >>> M = quaternion_matrix([0.99810947, 0.06146124, 0, 0]) >>> numpy.allclose(M, rotation_matrix(0.123, [1, 0, 0])) True >>> M = quaternion_matrix([1, 0, 0, 0]) >>> numpy.allclose(M, numpy.identity(4)) True >>> M = quaternion_matrix([0, 1, 0, 0]) >>> numpy.allclose(M, numpy.diag([1, -1, -1, 1])) True """ q = numpy.array(quaternion, dtype=numpy.float64, copy=True) n = numpy.dot(q, q) if n < _EPS: return numpy.identity(4) q *= math.sqrt(2.0 / n) q = numpy.outer(q, q) return numpy.array([ [1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0], 0.0], [ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0], 0.0], [ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2], 0.0], [ 0.0, 0.0, 0.0, 1.0]])
def __init__(self, initial=None): """Initialize virtual trackball control. initial : quaternion or rotation matrix """ self._axis = None self._axes = None self._radius = 1.0 self._center = [0.0, 0.0] self._vdown = numpy.array([0.0, 0.0, 1.0]) self._constrain = False if initial is None: self._qdown = numpy.array([1.0, 0.0, 0.0, 0.0]) else: initial = numpy.array(initial, dtype=numpy.float64) if initial.shape == (4, 4): self._qdown = quaternion_from_matrix(initial) elif initial.shape == (4, ): initial /= vector_norm(initial) self._qdown = initial else: raise ValueError("initial not a quaternion or matrix") self._qnow = self._qpre = self._qdown
def test_vectorized_jaccard_sim(): # The vectorized version of jaccard similarity is 20x faster, but it is # harder to understand. Compute it the simple way and compare to the # vectorized version def jaccard_sim(X, Y): assert len(X) == len(Y) a = np.sum((X == 1) & (Y == 1)) d = np.sum((X == 0) & (Y == 0)) return a / float(len(X) - d) def binary_sim(mat): n_rows = mat.shape[0] out = np.empty((n_rows, n_rows), dtype=np.float64) for i in range(n_rows): out[i][i] = 1. for j in range(0, i): out[i][j] = jaccard_sim(mat[i], mat[j]) out[j][i] = out[i][j] return out # Simulate 200 queries with 100 shared page ids matrix = np.random.rand(200, 100) > 0.7 simple = binary_sim(matrix) vectorized = mjolnir.norm_query._binary_sim(matrix) assert np.array_equal(simple, vectorized)
def sample(self, sess, chars, vocab, num, prime, temperature): state = self.cell.zero_state(1, tf.float32).eval() for char in prime[:-1]: x = np.zeros((1, 1)) x[0, 0] = vocab[char] feed = {self.input_data: x, self.initial_state: state} [state] = sess.run([self.final_state], feed) def weighted_pick(a): a = a.astype(np.float64) a = a.clip(min=1e-20) a = np.log(a) / temperature a = np.exp(a) / (np.sum(np.exp(a))) return np.argmax(np.random.multinomial(1, a, 1)) char = prime[-1] for n in range(num): x = np.zeros((1, 1)) x[0, 0] = vocab[char] feed = {self.input_data: x, self.initial_state: state} [probs, state] = sess.run([self.probs, self.final_state], feed) p = probs[0] sample = weighted_pick(p) char = chars[sample] yield char
def connectToDB(dbName=None, userName=None, dbPassword=None, dbHost=None, dbPort=None, dbCursor=psycopg2.extras.DictCursor): ''' Connect to a specified PostgreSQL DB and return connection and cursor objects. ''' # Start DB connection try: connectionString = "dbname='" + dbName + "'" if userName != None and userName != '': connectionString += " user='" + userName + "'" if dbHost != None and dbHost != '': connectionString += " host='" + dbHost + "'" if dbPassword != None and dbPassword != '': connectionString += " password='" + dbPassword + "'" if dbPort != None: connectionString += " port='" + str(dbPort) + "'" connection = psycopg2.connect(connectionString) register_adapter(numpy.float64, addapt_numpy_float64) register_adapter(numpy.int64, addapt_numpy_int64) except: raise # if the connection succeeded get a cursor cursor = connection.cursor(cursor_factory=dbCursor) return connection, cursor
def get_face_mask(img, img_l): img = np.zeros(img.shape[:2], dtype = np.float64) for idx in OVERLAY_POINTS_IDX: cv2.fillConvexPoly(img, cv2.convexHull(img_l[idx]), color = 1) img = np.array([img, img, img]).transpose((1, 2, 0)) img = (cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) > 0) * 1.0 img = cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) return img
def smooth_colors(src, dst, src_l): blur_amount = BLUR_FRACTION * np.linalg.norm(np.mean(src_l[LEFT_EYE_IDX], axis = 0) - np.mean(src_l[RIGHT_EYE_IDX], axis = 0)) blur_amount = (int)(blur_amount) if blur_amount % 2 == 0: blur_amount += 1 src_blur = cv2.GaussianBlur(src, (blur_amount, blur_amount), 0) dst_blur = cv2.GaussianBlur(dst, (blur_amount, blur_amount), 0) dst_blur += (128 * ( dst_blur <= 1.0 )).astype(dst_blur.dtype) return (np.float64(dst) * np.float64(src_blur)/np.float64(dst_blur))
def get_tm_opp(pts1, pts2): # Transformation matrix - ( Translation + Scaling + Rotation ) # using Procuster analysis pts1 = np.float64(pts1) pts2 = np.float64(pts2) m1 = np.mean(pts1, axis = 0) m2 = np.mean(pts2, axis = 0) # Removing translation pts1 -= m1 pts2 -= m2 std1 = np.std(pts1) std2 = np.std(pts2) std_r = std2/std1 # Removing scaling pts1 /= std1 pts2 /= std2 U, S, V = np.linalg.svd(np.transpose(pts1) * pts2) # Finding the rotation matrix R = np.transpose(U * V) return np.vstack([np.hstack((std_r * R, np.transpose(m2) - std_r * R * np.transpose(m1))), np.matrix([0.0, 0.0, 1.0])])
def sample(self, probs, temperature): if temperature == 0: return np.argmax(probs) probs = probs.astype(np.float64) #convert to float64 for higher precision probs = np.log(probs) / temperature probs = np.exp(probs) / math.fsum(np.exp(probs)) return np.argmax(np.random.multinomial(1, probs, 1)) #generate a sentence given conv_hidden
def test_FFT(FFT): N = FFT.N if FFT.rank == 0: A = random(N).astype(FFT.float) if FFT.communication == 'AlltoallN': C = empty(FFT.global_complex_shape(), dtype=FFT.complex) C = rfftn(A, C, axes=(0,1,2)) C[:, :, -1] = 0 # Remove Nyquist frequency A = irfftn(C, A, axes=(0,1,2)) B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex) B2 = rfftn(A, B2, axes=(0,1,2)) else: A = zeros(N, dtype=FFT.float) B2 = zeros(FFT.global_complex_shape(), dtype=FFT.complex) atol, rtol = (1e-10, 1e-8) if FFT.float is float64 else (5e-7, 1e-4) FFT.comm.Bcast(A, root=0) FFT.comm.Bcast(B2, root=0) a = zeros(FFT.real_shape(), dtype=FFT.float) c = zeros(FFT.complex_shape(), dtype=FFT.complex) a[:] = A[FFT.real_local_slice()] c = FFT.fftn(a, c) #print abs((c - B2[FFT.complex_local_slice()])/c.max()).max() assert all(abs((c - B2[FFT.complex_local_slice()])/c.max()) < rtol) #assert allclose(c, B2[FFT.complex_local_slice()], rtol, atol) a = FFT.ifftn(c, a) #print abs((a - A[FFT.real_local_slice()])/a.max()).max() assert all(abs((a - A[FFT.real_local_slice()])/a.max()) < rtol) #assert allclose(a, A[FFT.real_local_slice()], rtol, atol)
def datatypes(precision): """Return datatypes associated with precision.""" assert precision in ("single", "double") return {"single": (np.float32, np.complex64, MPI.C_FLOAT_COMPLEX), "double": (np.float64, np.complex128, MPI.C_DOUBLE_COMPLEX)}[precision]
def set_analog_power_up_states_with_output_type( self, power_up_states): """ Updates power up states for analog physical channels. Args: power_up_states (List[nidaqmx.types.AOPowerUpState]): Contains the physical channels and power up states to set. Each element of the list contains a physical channel and the power up state to set for that physical channel. - physical_channel (str): Specifies the physical channel to modify. - power_up_state (float): Specifies the power up state to set for the physical channel specified with the **physical_channel** input. - channel_type (:class:`nidaqmx.constants.AOPowerUpOutputBehavior`): Specifies the output type for the physical channel specified with the **physical_channel** input. """ physical_channel = flatten_channel_string( [p.physical_channel for p in power_up_states]) state = numpy.float64( [p.power_up_state for p in power_up_states]) channel_type = numpy.int32( [p.channel_type.value for p in power_up_states]) cfunc = lib_importer.cdll.DAQmxSetAnalogPowerUpStatesWithOutputType if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), wrapped_ndpointer(dtype=numpy.int32, flags=('C','W'))] error_code = cfunc( physical_channel, state, channel_type, len(power_up_states)) check_for_error(error_code)
def ao_power_amp_scaling_coeff(self): """ List[float]: Indicates the coefficients of a polynomial equation used to scale from pre-amplified values. """ cfunc = lib_importer.windll.DAQmxGetAOPowerAmpScalingCoeff if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_bridge_rngs(self): """ List[float]: Indicates pairs of input voltage ratio ranges, in volts per volt, supported by devices that acquire using ratiometric measurements. Each pair consists of the low value followed by the high value. """ cfunc = lib_importer.windll.DAQmxGetDevAIBridgeRngs if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_current_int_excit_discrete_vals(self): """ List[float]: Indicates the set of discrete internal current excitation values supported by this device. """ cfunc = lib_importer.windll.DAQmxGetDevAICurrentIntExcitDiscreteVals if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_current_rngs(self): """ List[float]: Indicates the pairs of current input ranges supported by this device. Each pair consists of the low value, followed by the high value. """ cfunc = lib_importer.windll.DAQmxGetDevAICurrentRngs if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_dig_fltr_lowpass_cutoff_freq_discrete_vals(self): """ List[float]: Indicates the set of discrete lowpass cutoff frequencies supported by this device. If the device supports ranges of lowpass cutoff frequencies, use AI.DigFltr.Lowpass.CutoffFreq.RangeVals to determine supported frequencies. """ cfunc = (lib_importer.windll. DAQmxGetDevAIDigFltrLowpassCutoffFreqDiscreteVals) if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_dig_fltr_lowpass_cutoff_freq_range_vals(self): """ List[float]: Indicates pairs of lowpass cutoff frequency ranges supported by this device. Each pair consists of the low value, followed by the high value. If the device supports a set of discrete lowpass cutoff frequencies, use AI.DigFltr.Lowpass.CutoffFreq.DiscreteVals to determine the supported frequencies. """ cfunc = (lib_importer.windll. DAQmxGetDevAIDigFltrLowpassCutoffFreqRangeVals) if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_freq_rngs(self): """ List[float]: Indicates the pairs of frequency input ranges supported by this device. Each pair consists of the low value, followed by the high value. """ cfunc = lib_importer.windll.DAQmxGetDevAIFreqRngs if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_lowpass_cutoff_freq_discrete_vals(self): """ List[float]: Indicates the set of discrete lowpass cutoff frequencies supported by this device. If the device supports ranges of lowpass cutoff frequencies, use **ai_lowpass_cutoff_freq_range_vals** to determine supported frequencies. """ cfunc = (lib_importer.windll. DAQmxGetDevAILowpassCutoffFreqDiscreteVals) if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_resistance_rngs(self): """ List[float]: Indicates pairs of input resistance ranges, in ohms, supported by devices that have the necessary signal conditioning to measure resistances. Each pair consists of the low value followed by the high value. """ cfunc = lib_importer.windll.DAQmxGetDevAIResistanceRngs if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_voltage_int_excit_discrete_vals(self): """ List[float]: Indicates the set of discrete internal voltage excitation values supported by this device. If the device supports ranges of internal excitation values, use **ai_voltage_int_excit_range_vals** to determine supported excitation values. """ cfunc = lib_importer.windll.DAQmxGetDevAIVoltageIntExcitDiscreteVals if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_voltage_int_excit_range_vals(self): """ List[float]: Indicates pairs of internal voltage excitation ranges supported by this device. Each pair consists of the low value, followed by the high value. If the device supports a set of discrete internal excitation values, use **ai_voltage_int_excit_discrete_vals** to determine the supported excitation values. """ cfunc = lib_importer.windll.DAQmxGetDevAIVoltageIntExcitRangeVals if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ao_current_rngs(self): """ List[float]: Indicates pairs of output current ranges supported by this device. Each pair consists of the low value, followed by the high value. """ cfunc = lib_importer.windll.DAQmxGetDevAOCurrentRngs if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ao_gains(self): """ List[float]: Indicates the output gain settings supported by this device. """ cfunc = lib_importer.windll.DAQmxGetDevAOGains if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()
def ai_bridge_poly_forward_coeff(self, val): val = numpy.float64(val) cfunc = lib_importer.windll.DAQmxSetAIBridgePolyForwardCoeff if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] error_code = cfunc( self._handle, self._name, val, len(val)) check_for_error(error_code)
def ai_bridge_poly_reverse_coeff(self): """ List[float]: Specifies an list of coefficients for the polynomial that converts physical values to electrical values. Each element of the list corresponds to a term of the equation. For example, if index three of the list is 9, the fourth term of the equation is 9x^3. """ cfunc = lib_importer.windll.DAQmxGetAIBridgePolyReverseCoeff if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes_byte_str, wrapped_ndpointer(dtype=numpy.float64, flags=('C','W')), ctypes.c_uint] temp_size = 0 while True: val = numpy.zeros(temp_size, dtype=numpy.float64) size_or_code = cfunc( self._handle, self._name, val, temp_size) if is_array_buffer_too_small(size_or_code): # Buffer size must have changed between calls; check again. temp_size = 0 elif size_or_code > 0 and temp_size == 0: # Buffer size obtained, use to retrieve data. temp_size = size_or_code else: break check_for_error(size_or_code) return val.tolist()