我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用tensorflow.python.framework.dtypes.float64()。
def set_floatx(value): """Sets the default float type. Arguments: value: String; 'float16', 'float32', or 'float64'. Example: ```python >>> from keras import backend as K >>> K.floatx() 'float32' >>> K.set_floatx('float16') >>> K.floatx() 'float16'
Raises: ValueError: In case of invalid value. """ global _FLOATX if value not in {'float16', 'float32', 'float64'}: raise ValueError('Unknown floatx type: ' + str(value)) _FLOATX = str(value)
```
def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. Arguments: x: Numpy array. Returns: The same Numpy array, cast to its new type. Example: ```python >>> from keras import backend as K >>> K.floatx() 'float32' >>> arr = numpy.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = K.cast_to_floatx(arr) >>> new_arr array([ 1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32')
""" return np.asarray(x, dtype=_FLOATX)
def _preprocess_conv2d_input(x, data_format): """Transpose and cast the input before the conv2d. Arguments: x: input tensor. data_format: string, one of 'channels_last', 'channels_first'. Returns: A tensor. """ if dtype(x) == 'float64': x = math_ops.cast(x, 'float32') if data_format == 'channels_first': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = array_ops.transpose(x, (0, 2, 3, 1)) return x
def _postprocess_conv3d_output(x, data_format): """Transpose and cast the output from conv3d if needed. Arguments: x: A tensor. data_format: string, one of "channels_last", "channels_first". Returns: A tensor. """ if data_format == 'channels_first': x = array_ops.transpose(x, (0, 4, 1, 2, 3)) if floatx() == 'float64': x = math_ops.cast(x, 'float64') return x
def approximate_duality_gap(self): """Add operations to compute the approximate duality gap. Returns: An Operation that computes the approximate duality gap over all examples. """ with name_scope('sdca/approximate_duality_gap'): _, values_list = self._hashtable.export_sharded() shard_sums = [] for values in values_list: with ops.device(values.device): shard_sums.append( math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0)) summed_values = math_ops.add_n(shard_sums) primal_loss = summed_values[1] dual_loss = summed_values[2] example_weights = summed_values[3] # Note: we return NaN if there are no weights or all weights are 0, e.g. # if no examples have been processed return (primal_loss + dual_loss + self._l1_loss() + (2.0 * self._l2_loss(self._symmetric_l2_regularization())) ) / example_weights
def _safe_scalar_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is != 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return control_flow_ops.cond( math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name)
def regularized_loss(self, examples): """Add operations to compute the loss with regularization loss included. Args: examples: Examples to compute loss on. Returns: An Operation that computes mean (regularized) loss for given set of examples. Raises: ValueError: if examples are not well defined. """ self._assertSpecified(['example_labels', 'example_weights', 'sparse_features', 'dense_features'], examples) self._assertList(['sparse_features', 'dense_features'], examples) with name_scope('sdca/regularized_loss'): weights = convert_to_tensor(examples['example_weights']) return (( self._l1_loss() + # Note that here we are using the raw regularization # (as specified by the user) and *not* # self._symmetric_l2_regularization(). self._l2_loss(self._options['symmetric_l2_regularization'])) / math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) + self.unregularized_loss(examples))
def _safe_scalar_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return control_flow_ops.cond( math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name)
def testCompareNearestNeighbor(self): input_shape = [1, 5, 6, 3] target_height = 8 target_width = 12 for nptype in [np.float32, np.float64]: for align_corners in [True, False]: img_np = np.arange(0, np.prod(input_shape), dtype=nptype).reshape(input_shape) with self.test_session(use_gpu=True): image = constant_op.constant(img_np, shape=input_shape) out_op = image_ops.resize_images(image, target_height, target_width, image_ops.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners) gpu_val = out_op.eval() with self.test_session(use_gpu=False): image = constant_op.constant(img_np, shape=input_shape) out_op = image_ops.resize_images(image, target_height, target_width, image_ops.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners) cpu_val = out_op.eval() self.assertAllClose(cpu_val, gpu_val, rtol=1e-5, atol=1e-5)
def test_normal_distribution_second_moment_estimated_correctly(self): # Test the importance sampled estimate against an analytical result. n = int(1e6) with self.test_session(): mu_p = constant_op.constant([0.0, 0.0], dtype=dtypes.float64) mu_q = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64) sigma_p = constant_op.constant([1.0, 2 / 3.], dtype=dtypes.float64) sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64) p = distributions.Normal(loc=mu_p, scale=sigma_p) q = distributions.Normal(loc=mu_q, scale=sigma_q) # Compute E_p[X^2]. # Should equal [1, (2/3)^2] log_e_x2 = monte_carlo.expectation_importance_sampler_logspace( log_f=lambda x: math_ops.log(math_ops.square(x)), log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42) e_x2 = math_ops.exp(log_e_x2) # Relative tolerance (rtol) chosen 2 times as large as minimim needed to # pass. self.assertEqual(p.get_batch_shape(), e_x2.get_shape()) self.assertAllClose([1., (2 / 3.)**2], e_x2.eval(), rtol=0.02)
def test_with_three_alphas(self): with self.test_session(): for dtype in (dtypes.float32, dtypes.float64): alpha_min = constant_op.constant(0.0, dtype=dtype) alpha_max = 0.5 decay_time = 3 alpha_0 = entropy.renyi_alpha( 0, decay_time, alpha_min=alpha_min, alpha_max=alpha_max) alpha_1 = entropy.renyi_alpha( 1, decay_time, alpha_min=alpha_min, alpha_max=alpha_max) alpha_2 = entropy.renyi_alpha( 2, decay_time, alpha_min=alpha_min, alpha_max=alpha_max) alpha_3 = entropy.renyi_alpha( 3, decay_time, alpha_min=alpha_min, alpha_max=alpha_max) # Alpha should start at alpha_max. self.assertAllClose(alpha_max, alpha_0.eval(), atol=1e-5) # Alpha should finish at alpha_min. self.assertAllClose(alpha_min.eval(), alpha_3.eval(), atol=1e-5) # In between, alpha should be monotonically decreasing. _assert_monotonic_decreasing( [alpha_0.eval(), alpha_1.eval(), alpha_2.eval(), alpha_3.eval()])
def testMapInt64ToFloat(self): for float_dtype in [dtypes.float32, dtypes.float64]: with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0.0, 1.1, 2.2], float_dtype) default_value = constant_op.constant(-1.5, float_dtype) table = lookup.MutableDenseHashTable( dtypes.int64, float_dtype, default_value=default_value, empty_key=0) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([11, 12, 15], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllClose([0, 1.1, -1.5], result)
def test_odeint_2d_linear(self): # Solve the 2D linear differential equation: # dy1 / dt = 3.0 * y1 + 4.0 * y2, # dy2 / dt = -4.0 * y1 + 3.0 * y2, # y1(0) = 0.0, # y2(0) = 1.0. # Its analytical solution is # y1 = sin(4.0 * t) * exp(3.0 * t), # y2 = cos(4.0 * t) * exp(3.0 * t). matrix = constant_op.constant( [[3.0, 4.0], [-4.0, 3.0]], dtype=dtypes.float64) func = lambda y, t: math_ops.matmul(matrix, y) y0 = constant_op.constant([[0.0], [1.0]], dtype=dtypes.float64) t = np.linspace(0.0, 1.0, 11) y_solved = odes.odeint(func, y0, t) with self.test_session() as sess: y_solved = sess.run(y_solved) y_true = np.zeros((len(t), 2, 1)) y_true[:, 0, 0] = np.sin(4.0 * t) * np.exp(3.0 * t) y_true[:, 1, 0] = np.cos(4.0 * t) * np.exp(3.0 * t) self.assertAllClose(y_true, y_solved, atol=1e-5)
def testContinueTrainingDictionaryInput(self): boston = base.load_boston() output_dir = tempfile.mkdtemp() est = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir) boston_input = {'input': boston.data} float64_target = {'labels': boston.target.astype(np.float64)} est.fit(x=boston_input, y=float64_target, steps=50) scores = est.evaluate( x=boston_input, y=float64_target, metrics={'MSE': metric_ops.streaming_mean_squared_error}) del est # Create another estimator object with the same output dir. est2 = estimator.Estimator(model_fn=linear_model_fn, model_dir=output_dir) # Check we can evaluate and predict. scores2 = est2.evaluate( x=boston_input, y=float64_target, metrics={'MSE': metric_ops.streaming_mean_squared_error}) self.assertAllClose(scores2['MSE'], scores['MSE']) predictions = np.array(list(est2.predict(x=boston_input))) other_score = _sklearn.mean_squared_error(predictions, float64_target['labels']) self.assertAllClose(other_score, scores['MSE'])
def testNonmatchingMuSigmaFailsDynamic(self): with self.test_session(): mu_ph = array_ops.placeholder(dtypes.float64) chol_ph = array_ops.placeholder(dtypes.float64) mu_v = self._rng.rand(2) chol_v, _ = self._random_chol(2, 2, 2) mvn = distributions.MultivariateNormalCholesky( mu_ph, chol_ph, validate_args=True) with self.assertRaisesOpError("mu should have rank 1 less than cov"): mvn.mean().eval(feed_dict={mu_ph: mu_v, chol_ph: chol_v}) mu_v = self._rng.rand(2, 1) chol_v, _ = self._random_chol(2, 2, 2) mvn = distributions.MultivariateNormalCholesky( mu_ph, chol_ph, validate_args=True) with self.assertRaisesOpError("mu.shape and cov.shape.*should match"): mvn.mean().eval(feed_dict={mu_ph: mu_v, chol_ph: chol_v})
def valid_dtypes(): """Valid types for loss, variables and gradients. Subclasses should override to allow other float types. Returns: Valid types for loss, variables and gradients. """ return set([dtypes.float16, dtypes.float32, dtypes.float64])
def floatx(): """Returns the default float type, as a string. E.g. 'float16', 'float32', 'float64'. Returns: String, the current default float type. Example: ```python >>> keras.backend.floatx() 'float32'
""" return _FLOATX
def _convert_string_dtype(dtype): """Get the type from a string. Arguments: dtype: A string representation of a type. Returns: The type requested. Raises: ValueError: if `dtype` is not supported. """ if dtype == 'float16': return dtypes_module.float16 if dtype == 'float32': return dtypes_module.float32 elif dtype == 'float64': return dtypes_module.float64 elif dtype == 'int16': return dtypes_module.int16 elif dtype == 'int32': return dtypes_module.int32 elif dtype == 'int64': return dtypes_module.int64 elif dtype == 'uint8': return dtypes_module.int8 elif dtype == 'uint16': return dtypes_module.uint16 else: raise ValueError('Unsupported dtype:', dtype)
def variable(value, dtype=None, name=None): """Instantiates a variable and returns it. Arguments: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. Returns: A variable instance (with Keras metadata included). Examples: ```python >>> from keras import backend as K >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val, dtype='float64', name='example_var') >>> K.dtype(kvar) 'float64' >>> print(kvar) example_var >>> kvar.eval() array([[ 1., 2.], [ 3., 4.]])
""" if dtype is None: dtype = floatx() if hasattr(value, 'tocoo'): sparse_coo = value.tocoo() indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims( sparse_coo.col, 1)), 1) v = sparse_tensor.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape) v._uses_learning_phase = False return v v = variables_module.Variable( value, dtype=_convert_string_dtype(dtype), name=name) v._uses_learning_phase = False return v
def cast(x, dtype): """Casts a tensor to a different dtype and returns it. You can cast a Keras variable but it still returns a Keras tensor. Arguments: x: Keras tensor (or variable). dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). Returns: Keras tensor with dtype `dtype`. Example: ```python >>> from keras import backend as K >>> input = K.placeholder((2, 3), dtype='float32') >>> input <tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32> # It doesn't work in-place as below. >>> K.cast(input, dtype='float16') <tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16> >>> input <tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32> # you need to assign it. >>> input = K.cast(input, dtype='float16') >>> input <tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
""" return math_ops.cast(x, dtype) # UPDATES OPS
def _preprocess_conv3d_input(x, data_format): """Transpose and cast the input before the conv3d. Arguments: x: input tensor. data_format: string, one of 'channels_last', 'channels_first'. Returns: A tensor. """ if dtype(x) == 'float64': x = math_ops.cast(x, 'float32') if data_format == 'channels_first': x = array_ops.transpose(x, (0, 2, 3, 4, 1)) return x
def _preprocess_conv2d_kernel(kernel, data_format): """Transpose and cast the kernel before the conv2d. Arguments: kernel: kernel tensor. data_format: string, one of 'channels_last', 'channels_first'. Returns: A tensor. """ if dtype(kernel) == 'float64': kernel = math_ops.cast(kernel, 'float32') if data_format == 'channels_first': kernel = array_ops.transpose(kernel, (2, 3, 1, 0)) return kernel
def _preprocess_conv3d_kernel(kernel, data_format): """Transpose and cast the kernel before the conv3d. Arguments: kernel: kernel tensor. data_format: string, one of 'channels_last', 'channels_first'. Returns: A tensor. """ if dtype(kernel) == 'float64': kernel = math_ops.cast(kernel, 'float32') if data_format == 'channels_first': kernel = array_ops.transpose(kernel, (2, 3, 4, 1, 0)) return kernel
def on_epoch_begin(self, epoch, logs=None): if not hasattr(self.model.optimizer, 'lr'): raise ValueError('Optimizer must have a "lr" attribute.') lr = self.schedule(epoch) if not isinstance(lr, (float, np.float32, np.float64)): raise ValueError('The output of the "schedule" function ' 'should be float.') K.set_value(self.model.optimizer.lr, lr)
def assert_same_float_dtype(tensors=None, dtype=None): """Validate and return float type based on `tensors` and `dtype`. For ops such as matrix multiplication, inputs and weights must be of the same float type. This function validates that all `tensors` are the same type, validates that type is `dtype` (if supplied), and returns the type. Type must be `dtypes.float32` or `dtypes.float64`. If neither `tensors` nor `dtype` is supplied, default to `dtypes.float32`. Args: tensors: Tensors of input values. Can include `None` elements, which will be ignored. dtype: Expected type. Returns: Validated type. Raises: ValueError: if neither `tensors` nor `dtype` is supplied, or result is not float. """ if tensors: dtype = _assert_same_base_type(tensors, dtype) if not dtype: dtype = dtypes.float32 elif not dtype.is_floating: raise ValueError('Expected float, got %s.' % dtype) return dtype
def _l1_loss(self): """Computes the (un-normalized) l1 loss of the model.""" with name_scope('sdca/l1_loss'): sums = [] for name in ['sparse_features_weights', 'dense_features_weights']: for weights in self._convert_n_to_tensor(self._variables[name]): with ops.device(weights.device): sums.append( math_ops.reduce_sum( math_ops.abs(math_ops.cast(weights, dtypes.float64)))) sum = math_ops.add_n(sums) # SDCA L1 regularization cost is: l1 * sum(|weights|) return self._options['symmetric_l1_regularization'] * sum
def _l2_loss(self, l2): """Computes the (un-normalized) l2 loss of the model.""" with name_scope('sdca/l2_loss'): sums = [] for name in ['sparse_features_weights', 'dense_features_weights']: for weights in self._convert_n_to_tensor(self._variables[name]): with ops.device(weights.device): sums.append( math_ops.reduce_sum( math_ops.square(math_ops.cast(weights, dtypes.float64)))) sum = math_ops.add_n(sums) # SDCA L2 regularization cost is: l2 * sum(weights^2) / 2 return l2 * sum / 2.0
def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype
def get_feature_spec(self): dtype = self.dtype # Convert, because example parser only supports float32, int64 and string. if dtype == dtypes.int32: dtype = dtypes.int64 if dtype == dtypes.float64: dtype = dtypes.float32 if self.is_sparse: return parsing_ops.VarLenFeature(dtype=dtype) return parsing_ops.FixedLenFeature(shape=self.shape[1:], dtype=dtype)
def _interp_evaluate(coefficients, t0, t1, t): """Evaluate polynomial interpolation at the given time point. Args: coefficients: list of Tensor coefficients as created by `interp_fit`. t0: scalar float64 Tensor giving the start of the interval. t1: scalar float64 Tensor giving the end of the interval. t: scalar float64 Tensor giving the desired interpolation point. Returns: Polynomial interpolation of the coefficients at time `t`. """ with ops.name_scope('interp_evaluate'): t0 = ops.convert_to_tensor(t0) t1 = ops.convert_to_tensor(t1) t = ops.convert_to_tensor(t) dtype = coefficients[0].dtype assert_op = control_flow_ops.Assert( (t0 <= t) & (t <= t1), ['invalid interpolation, fails `t0 <= t <= t1`:', t0, t, t1]) with ops.control_dependencies([assert_op]): x = math_ops.cast((t - t0) / (t1 - t0), dtype) xs = [constant_op.constant(1, dtype), x] for _ in range(2, len(coefficients)): xs.append(xs[-1] * x) return _dot_product(coefficients, reversed(xs))
def availableGPUModes(self, opt, nptype): if opt == image_ops.ResizeMethod.NEAREST_NEIGHBOR \ and nptype in [np.float32, np.float64]: return [True, False] else: return [False]
def testConvertBetweenFloat(self): # Make sure converting to between float types does nothing interesting with self.test_session(): self._convert([-1.0, 0, 1.0, 200000], dtypes.float32, dtypes.float64, [-1.0, 0, 1.0, 200000]) self._convert([-1.0, 0, 1.0, 200000], dtypes.float64, dtypes.float32, [-1.0, 0, 1.0, 200000])
def _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value=None, delta=1e-3, feed_dict=None, prep_fn=None, limit=0): """Computes the theoretical and numerical jacobian.""" t = dtypes.as_dtype(x.dtype) allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name t2 = dtypes.as_dtype(y.dtype) assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name if x_init_value is not None: i_shape = list(x_init_value.shape) assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % ( x_shape, i_shape) x_data = x_init_value else: if t == dtypes.float16: dtype = np.float16 elif t == dtypes.float32: dtype = np.float32 else: dtype = np.float64 x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype) print("\ttheoretical jacobian..") jacob_t = _compute_theoretical_jacobian(x, x_shape, x_data, dy, y_shape, dx, feed_dict, prep_fn=prep_fn) print("\tnumeric jacobian..") jacob_n = _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, feed_dict, prep_fn=prep_fn, limit=limit) return jacob_t, jacob_n
def testZeroInitializer(self): for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64): for use_init in (False, True): self._testZeroInitializer( [10, 20], array_ops.ones( [10, 20], dtype=dtype), use_init)
def test_normal_integral_mean_and_var_correctly_estimated(self): n = int(1e6) with self.test_session(): mu_p = constant_op.constant([-1.0, 1.0], dtype=dtypes.float64) mu_q = constant_op.constant([0.0, 0.0], dtype=dtypes.float64) sigma_p = constant_op.constant([0.5, 0.5], dtype=dtypes.float64) sigma_q = constant_op.constant([1.0, 1.0], dtype=dtypes.float64) p = distributions.Normal(loc=mu_p, scale=sigma_p) q = distributions.Normal(loc=mu_q, scale=sigma_q) # Compute E_p[X]. e_x = monte_carlo.expectation_importance_sampler( f=lambda x: x, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42) # Compute E_p[X^2]. e_x2 = monte_carlo.expectation_importance_sampler( f=math_ops.square, log_p=p.log_prob, sampling_dist_q=q, n=n, seed=42) stddev = math_ops.sqrt(e_x2 - math_ops.square(e_x)) # Relative tolerance (rtol) chosen 2 times as large as minimim needed to # pass. # Convergence of mean is +- 0.003 if n = 100M # Convergence of stddev is +- 0.00001 if n = 100M self.assertEqual(p.get_batch_shape(), e_x.get_shape()) self.assertAllClose(p.mean().eval(), e_x.eval(), rtol=0.01) self.assertAllClose(p.stddev().eval(), stddev.eval(), rtol=0.02)