我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用numpy.promote_types()。
def test_promote_types_endian(self): # promote_types should always return native-endian types assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8')) assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8')) assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21')) assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21')) assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21')) assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21')) assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8')) assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8')) assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8')) assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8')) assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8')) assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8')) assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8')) assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8')) assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8')) assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def testGram(test): instance, reference=test[TEST.INSTANCE], test[TEST.REFERENCE] # usually expect the normalized matrix to be promoted in type complexity # due to division by column-norm during the process. However there exist # matrices that treat the problem differently. Exclude the expected pro- # motion for them. query=({} if isinstance(instance, (Diag, Eye, Zero)) else {TEST.TYPE_PROMOTION: np.float32}) # account for "extra computation stage" in gram query[TEST.TOL_POWER]=test.get(TEST.TOL_POWER, 1.) * 2 query[TEST.RESULT_OUTPUT]=instance.gram.array query[TEST.RESULT_REF]=reference.astype( np.promote_types(np.float32, reference.dtype)).T.conj().dot(reference) # ignore actual type of generated gram: query[TEST.CHECK_DATATYPE]=False return compareResults(test, query) ################################################## test: T (property)
def test_jacobian_set_item(self, dtypes, shapes): shape, constructor, expected_shape = shapes dtype, value = dtypes prob = Problem(model=Group()) comp = ExplicitSetItemComp(dtype, value, shape, constructor) prob.model.add_subsystem('C1', comp) prob.setup(check=False) prob.set_solver_print(level=0) prob.run_model() prob.model.run_apply_nonlinear() prob.model.run_linearize() expected = constructor(value) with prob.model._subsystems_allprocs[0].jacobian_context() as J: jac_out = J['out', 'in'] * -1 self.assertEqual(len(jac_out.shape), 2) expected_dtype = np.promote_types(dtype, float) self.assertEqual(jac_out.dtype, expected_dtype) assert_rel_error(self, jac_out, np.atleast_2d(expected).reshape(expected_shape), 1e-15)
def test_promote_types_strings(self): assert_equal(np.promote_types('bool', 'S'), np.dtype('S5')) assert_equal(np.promote_types('b', 'S'), np.dtype('S4')) assert_equal(np.promote_types('u1', 'S'), np.dtype('S3')) assert_equal(np.promote_types('u2', 'S'), np.dtype('S5')) assert_equal(np.promote_types('u4', 'S'), np.dtype('S10')) assert_equal(np.promote_types('u8', 'S'), np.dtype('S20')) assert_equal(np.promote_types('i1', 'S'), np.dtype('S4')) assert_equal(np.promote_types('i2', 'S'), np.dtype('S6')) assert_equal(np.promote_types('i4', 'S'), np.dtype('S11')) assert_equal(np.promote_types('i8', 'S'), np.dtype('S21')) assert_equal(np.promote_types('bool', 'U'), np.dtype('U5')) assert_equal(np.promote_types('b', 'U'), np.dtype('U4')) assert_equal(np.promote_types('u1', 'U'), np.dtype('U3')) assert_equal(np.promote_types('u2', 'U'), np.dtype('U5')) assert_equal(np.promote_types('u4', 'U'), np.dtype('U10')) assert_equal(np.promote_types('u8', 'U'), np.dtype('U20')) assert_equal(np.promote_types('i1', 'U'), np.dtype('U4')) assert_equal(np.promote_types('i2', 'U'), np.dtype('U6')) assert_equal(np.promote_types('i4', 'U'), np.dtype('U11')) assert_equal(np.promote_types('i8', 'U'), np.dtype('U21')) assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5')) assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('b', 'S1'), np.dtype('S4')) assert_equal(np.promote_types('b', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3')) assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5')) assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10')) assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30')) assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20')) assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_dtype_promotion(self): # datetime <op> datetime computes the metadata gcd # timedelta <op> timedelta computes the metadata gcd for mM in ['m', 'M']: assert_equal( np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')), np.dtype(mM+'8[2Y]')) assert_equal( np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')), np.dtype(mM+'8[3Y]')) assert_equal( np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')), np.dtype(mM+'8[2M]')) assert_equal( np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')), np.dtype(mM+'8[1D]')) assert_equal( np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')), np.dtype(mM+'8[s]')) assert_equal( np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')), np.dtype(mM+'8[7s]')) # timedelta <op> timedelta raises when there is no reasonable gcd assert_raises(TypeError, np.promote_types, np.dtype('m8[Y]'), np.dtype('m8[D]')) assert_raises(TypeError, np.promote_types, np.dtype('m8[M]'), np.dtype('m8[W]')) # timedelta <op> timedelta may overflow with big unit ranges assert_raises(OverflowError, np.promote_types, np.dtype('m8[W]'), np.dtype('m8[fs]')) assert_raises(OverflowError, np.promote_types, np.dtype('m8[s]'), np.dtype('m8[as]'))
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False): print('+', end=' ') for char in ntypes: print(char, end=' ') print() for row in ntypes: if row == 'O': rowtype = GenericObject else: rowtype = np.obj2sctype(row) print(row, end=' ') for col in ntypes: if col == 'O': coltype = GenericObject else: coltype = np.obj2sctype(col) try: if firstarray: rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype) else: rowvalue = rowtype(inputfirstvalue) colvalue = coltype(inputsecondvalue) if use_promote_types: char = np.promote_types(rowvalue.dtype, colvalue.dtype).char else: value = np.add(rowvalue, colvalue) if isinstance(value, np.ndarray): char = value.dtype.char else: char = np.dtype(type(value)).char except ValueError: char = '!' except OverflowError: char = '@' except TypeError: char = '#' print(char, end=' ') print()
def dtype(self): """Return dtype of image data in file.""" # subblock data can be of different pixel type dtype = self.filtered_subblock_directory[0].dtype[-2:] for directory_entry in self.filtered_subblock_directory: dtype = numpy.promote_types(dtype, directory_entry.dtype[-2:]) return dtype
def testLargestSV(test): query={TEST.TYPE_EXPECTED: np.float64} instance=test[TEST.INSTANCE] # account for "extra computation stage" (gram) in largestSV query[TEST.TOL_POWER]=test.get(TEST.TOL_POWER, 1.) * 2 query[TEST.TOL_MINEPS]=_getTypeEps(safeTypeExpansion(instance.dtype)) # determine reference result largestSV=np.linalg.svd(test[TEST.REFERENCE], compute_uv=False)[0] query[TEST.RESULT_REF]=np.array( largestSV, dtype=np.promote_types(largestSV.dtype, np.float64)) # largestSV may not converge fast enough for a bad random starting point # so retry some times before throwing up for tries in range(9): maxSteps=100. * 10. ** (tries / 2.) query[TEST.RESULT_OUTPUT]=np.array( instance.getLargestSV(maxSteps=maxSteps, alwaysReturn=True)) result=compareResults(test, query) if result[TEST.RESULT]: break return result ################################################## test: gram (property)
def combine_data_frame_files(output_filename, input_filenames): in_files = [ h5py.File(f, 'r') for f in input_filenames ] column_names = [ tuple(sorted(f.attrs.get("column_names"))) for f in in_files ] uniq = set(column_names) if len(uniq) > 1: raise Exception("you're attempting to combine incompatible data frames") if len(uniq) == 0: r = "No input files? output: %s, inputs: %s" % (output_filename, str(input_filenames)) raise Exception(r) column_names = uniq.pop() if os.path.exists(output_filename): os.remove(output_filename) out = h5py.File(output_filename) out.attrs.create("column_names", column_names) # Write successive columns for c in column_names: datasets = [f[c] for f in in_files if len(f[c]) > 0] num_w_levels = np.sum([has_levels(ds) for ds in datasets if len(ds) > 0]) fract_w_levels = float(num_w_levels) / (len(datasets) + 1) if fract_w_levels > 0.25: combine_level_column(out, datasets, c) continue # filter out empty rows from the type promotion, unless they're all empty types = [get_col_type(ds) for ds in datasets if len(ds) > 0] if len(types) == 0: # Fall back to getting column types from empty data frames types = [get_col_type(f[c]) for f in in_files] common_type = reduce(np.promote_types, types) # numpy doesn't understand vlen strings -- so always promote to vlen strings if anything is using them if vlen_string in types: common_type = vlen_string out_ds = out.create_dataset(c, shape=(0,), maxshape=(None,), dtype=common_type, compression=COMPRESSION, shuffle=True, chunks=(CHUNK_SIZE,)) item_count = 0 for ds in datasets: new_items = ds.shape[0] out_ds.resize((item_count + new_items,)) data = ds[:] if has_levels(ds): levels = get_levels(ds) data = levels[data] out_ds[item_count:(item_count + new_items)] = data item_count += new_items for in_f in in_files: in_f.close() out.close()
def ISTA( fmatA, arrB, numLambda=0.1, numMaxSteps=100 ): ''' Wrapper around the ISTA algrithm to allow processing of arrays of signals fmatA - input system matrix arrB - input data vector (measurements) numLambda - balancing parameter in optimization problem between data fidelity and sparsity numMaxSteps - maximum number of steps to run numL - step size during the conjugate gradient step ''' if len(arrB.shape) > 2: raise ValueError("Only n x m arrays are supported for ISTA") # calculate the largest singular value to get the right step size numL = 1.0 / (fmatA.largestSV ** 2) arrX = np.zeros( (fmatA.numM, arrB.shape[1]), dtype=np.promote_types(np.float32, arrB.dtype) ) # start iterating for numStep in range(numMaxSteps): # do the gradient step and threshold arrStep = arrX - numL * fmatA.backward(fmatA.forward(arrX) - arrB) arrX = _softThreshold(arrStep, numL * numLambda * 0.5) # return the unthresholded values for all non-zero support elements return np.where(arrX != 0, arrStep, arrX) ################################################################################ ### Maintenance and Documentation ################################################################################ ################################################## inspection interface
def FISTA( fmatA, arrB, numLambda=0.1, numMaxSteps=100 ): ''' Wrapper around the FISTA algrithm to allow processing of arrays of signals fmatA - input system matrix arrB - input data vector (measurements) numLambda - balancing parameter in optimization problem between data fidelity and sparsity numMaxSteps - maximum number of steps to run numL - step size during the conjugate gradient step ''' if len(arrB.shape) > 2: raise ValueError("Only n x m arrays are supported for FISTA") # calculate the largest singular value to get the right step size numL = 1.0 / (fmatA.largestSV ** 2) t = 1 arrX = np.zeros( (fmatA.numM, arrB.shape[1]), dtype=np.promote_types(np.float32, arrB.dtype) ) # initial arrY arrY = np.copy(arrX) # start iterating for numStep in range(numMaxSteps): arrXold = np.copy(arrX) # do the gradient step and threshold arrStep = arrY - numL * fmatA.backward(fmatA.forward(arrY) - arrB) arrX = _softThreshold(arrStep, numL * numLambda * 0.5) # update t tOld =t t = (1 + np.sqrt(1 + 4 * t ** 2)) / 2 # update arrY arrY = arrX + ((tOld - 1) / t) * (arrX - arrXold) # return the unthresholded values for all non-zero support elements return np.where(arrX != 0, arrStep, arrX) ################################################################################ ### Maintenance and Documentation ################################################################################ ################################################## inspection interface
def _set_abs(self, abs_key, subjac): """ Set sub-Jacobian. Parameters ---------- abs_key : (str, str) Absolute name pair of sub-Jacobian. subjac : int or float or ndarray or sparse matrix sub-Jacobian as a scalar, vector, array, or AIJ list or tuple. """ if not issparse(subjac): # np.promote_types will choose the smallest dtype that can contain both arguments subjac = np.atleast_1d(subjac) safe_dtype = np.promote_types(subjac.dtype, float) subjac = subjac.astype(safe_dtype, copy=False) # Bail here so that we allow top level jacobians to be of reduced size when indices are # specified on driver vars. if self._override_checks: self._subjacs[abs_key] = subjac return if abs_key in self._subjacs_info: subjac_info = self._subjacs_info[abs_key][0] rows = subjac_info['rows'] else: rows = None if rows is None: # Dense subjac shape = self._abs_key2shape(abs_key) subjac = np.atleast_2d(subjac) if subjac.shape == (1, 1): subjac = subjac[0, 0] * np.ones(shape, dtype=safe_dtype) else: subjac = subjac.reshape(shape) if abs_key in self._subjacs and self._subjacs[abs_key].shape == shape: np.copyto(self._subjacs[abs_key], subjac) else: self._subjacs[abs_key] = subjac.copy() else: # Sparse subjac if subjac.shape == (1,): subjac = subjac[0] * np.ones(rows.shape, dtype=safe_dtype) if subjac.shape != rows.shape: raise ValueError("Sub-jacobian for key %s has " "the wrong shape (%s), expected (%s)." % (abs_key, subjac.shape, rows.shape)) if abs_key in self._subjacs and subjac.shape == self._subjacs[abs_key][0].shape: np.copyto(self._subjacs[abs_key][0], subjac) else: self._subjacs[abs_key] = [subjac.copy(), rows, subjac_info['cols']] else: self._subjacs[abs_key] = subjac