我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.dtypes.bool()。
def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of `x`. """ axis = _normalize_axis(axis, ndim(x)) if x.dtype.base_dtype == dtypes_module.bool: x = math_ops.cast(x, floatx()) m = math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=True) devs_squared = math_ops.square(x - m) return math_ops.reduce_mean( devs_squared, reduction_indices=axis, keep_dims=keepdims)
def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. Arguments: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of elements of `x`. """ axis = _normalize_axis(axis, ndim(x)) if x.dtype.base_dtype == dtypes_module.bool: x = math_ops.cast(x, floatx()) return math_ops.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)
def in_top_k(predictions, targets, k): """Returns whether the `targets` are in the top `k` `predictions`. Arguments: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. """ return nn.in_top_k(predictions, targets, k) # CONVOLUTIONS
def _make_test_csv_sparse(): f = tempfile.NamedTemporaryFile( dir=tf.test.get_temp_dir(), delete=False, mode="w") w = csv.writer(f) w.writerow(["int", "float", "bool", "string"]) for _ in range(100): # leave columns empty; these will be read as default value (e.g. 0 or NaN) intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else "" floatvalue = np.random.rand() if np.random.rand() > 0.5 else "" boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else "" stringvalue = (("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else "") row = [intvalue, floatvalue, boolvalue, stringvalue] w.writerow(row) f.close() return f.name
def testTraversesControlInputs(self): dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.)) logits = dt1.value() * 3. dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits)) dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.)) x = dt3.value() y = array_ops.ones((2, 2)) * 4. z = array_ops.ones((2, 2)) * 3. out = control_flow_ops.cond( math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y), lambda: math_ops.square(z)) out += 5. dep_map = sg._stochastic_dependencies_map([out]) self.assertEqual(dep_map[dt1], set([out])) self.assertEqual(dep_map[dt2], set([out])) self.assertEqual(dep_map[dt3], set([out]))
def _make_test_csv_sparse(): f = tempfile.NamedTemporaryFile( dir=test.get_temp_dir(), delete=False, mode="w") w = csv.writer(f) w.writerow(["int", "float", "bool", "string"]) for _ in range(100): # leave columns empty; these will be read as default value (e.g. 0 or NaN) intvalue = np.random.randint(-10, 10) if np.random.rand() > 0.5 else "" floatvalue = np.random.rand() if np.random.rand() > 0.5 else "" boolvalue = int(np.random.rand() > 0.3) if np.random.rand() > 0.5 else "" stringvalue = (("S: %.4f" % np.random.rand()) if np.random.rand() > 0.5 else "") row = [intvalue, floatvalue, boolvalue, stringvalue] w.writerow(row) f.close() return f.name
def testUnweighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) tp, tp_update_op = metrics.streaming_true_positives(predictions, labels) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, tp.eval()) self.assertEqual(1, tp_update_op.eval()) self.assertEqual(1, tp.eval())
def testWeighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) tp, tp_update_op = metrics.streaming_true_positives( predictions, labels, weights=37.0) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, tp.eval()) self.assertEqual(37.0, tp_update_op.eval()) self.assertEqual(37.0, tp.eval())
def testUnweighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) fn, fn_update_op = metrics.streaming_false_negatives(predictions, labels) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, fn.eval()) self.assertEqual(2, fn_update_op.eval()) self.assertEqual(2, fn.eval())
def testWeighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) fn, fn_update_op = metrics.streaming_false_negatives( predictions, labels, weights=((3.0,), (5.0,), (7.0,))) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, fn.eval()) self.assertEqual(8.0, fn_update_op.eval()) self.assertEqual(8.0, fn.eval())
def testUnweighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) fp, fp_update_op = metrics.streaming_false_positives(predictions, labels) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, fp.eval()) self.assertEqual(4, fp_update_op.eval()) self.assertEqual(4, fp.eval())
def testUnweighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) tn, tn_update_op = metrics.streaming_true_negatives(predictions, labels) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, tn.eval()) self.assertEqual(5, tn_update_op.eval()) self.assertEqual(5, tn.eval())
def testWeighted(self): for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32): predictions = math_ops.cast(constant_op.constant( ((1, 0, 1, 0), (0, 1, 1, 1), (0, 0, 0, 0))), dtype=dtype) labels = math_ops.cast(constant_op.constant( ((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0))), dtype=dtype) tn, tn_update_op = metrics.streaming_true_negatives( predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),)) with self.test_session() as sess: sess.run(variables.local_variables_initializer()) self.assertEqual(0, tn.eval()) self.assertEqual(15.0, tn_update_op.eval()) self.assertEqual(15.0, tn.eval())
def __init__(self, inputs, sequence_length, time_major=False, name=None): """Initializer. Args: inputs: A (structure of) input tensors. sequence_length: An int32 vector tensor. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. name: Name scope for any created operations. Raises: ValueError: if `sequence_length` is not a 1D tensor. """ with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]): inputs = ops.convert_to_tensor(inputs, name="inputs") if not time_major: inputs = nest.map_structure(_transpose_batch_time, inputs) self._input_tas = nest.map_structure(_unstack_ta, inputs) self._sequence_length = ops.convert_to_tensor( sequence_length, name="sequence_length") if self._sequence_length.get_shape().ndims != 1: raise ValueError( "Expected sequence_length to be a vector, but received shape: %s" % self._sequence_length.get_shape()) self._zero_inputs = nest.map_structure( lambda inp: array_ops.zeros_like(inp[0, :]), inputs) self._batch_size = array_ops.size(sequence_length)
def __init__(self, inputs, sequence_length, embedding, sampling_probability, time_major=False, seed=None, scheduling_seed=None, name=None): """Initializer. Args: inputs: A (structure of) input tensors. sequence_length: An int32 vector tensor. embedding: A callable that takes a vector tensor of `ids` (argmax ids), or the `params` argument for `embedding_lookup`. sampling_probability: A 0D `float32` tensor: the probability of sampling categorically from the output ids instead of reading directly from the inputs. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. seed: The sampling seed. scheduling_seed: The schedule decision rule sampling seed. name: Name scope for any created operations. Raises: ValueError: if `sampling_probability` is not a scalar or vector. """ with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper", [embedding, sampling_probability]): if callable(embedding): self._embedding_fn = embedding else: self._embedding_fn = ( lambda ids: embedding_ops.embedding_lookup(embedding, ids)) self._sampling_probability = ops.convert_to_tensor( sampling_probability, name="sampling_probability") if self._sampling_probability.get_shape().ndims not in (0, 1): raise ValueError( "sampling_probability must be either a scalar or a vector. " "saw shape: %s" % (self._sampling_probability.get_shape())) self._seed = seed self._scheduling_seed = scheduling_seed super(ScheduledEmbeddingTrainingHelper, self).__init__( inputs=inputs, sequence_length=sequence_length, time_major=time_major, name=name)
def sample(self, time, outputs, state, name=None): with ops.name_scope(name, "ScheduledOutputTrainingHelperSample", [time, outputs, state]): sampler = bernoulli.Bernoulli(probs=self._sampling_probability) return math_ops.cast( sampler.sample(sample_shape=self.batch_size, seed=self._seed), dtypes.bool)
def hyper_parameters(): py_all = all py_sum = sum # INTERNAL UTILS # This is the default internal TF session used by Keras. # It can be set manually via `set_session(sess)`. _SESSION = None # This dictionary holds a mapping {graph: learning_phase}. # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). _GRAPH_LEARNING_PHASES = {} # This dictionary holds a mapping {graph: UID_DICT}. # each UID_DICT is a dictionary mapping name prefixes to a current index, # used for generatic graph-specific string UIDs # for various names (e.g. layer names). _GRAPH_UID_DICTS = {} # This boolean flag can be set to True to leave variable initialization # up to the user. # Change its value via `manual_variable_initialization(value)`. _MANUAL_VAR_INIT = False # The type of float to use throughout a session. _FLOATX = 'float32' # Epsilon fuzz factor used throughout the codebase. _EPSILON = 10e-8 # Default image data format, one of "channels_last", "channels_first". _IMAGE_DATA_FORMAT = 'channels_last'
def clear_session(): """Destroys the current TF graph and creates a new one. Useful to avoid clutter from old models / layers. """ global _SESSION global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned ops.reset_default_graph() reset_uids() _SESSION = None phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase') _GRAPH_LEARNING_PHASES = {} _GRAPH_LEARNING_PHASES[ops.get_default_graph()] = phase
def learning_phase(): """Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. Returns: Learning phase (scalar integer tensor or Python integer). """ graph = ops.get_default_graph() if graph not in _GRAPH_LEARNING_PHASES: phase = array_ops.placeholder(dtype='bool', name='keras_learning_phase') _GRAPH_LEARNING_PHASES[graph] = phase return _GRAPH_LEARNING_PHASES[graph]
def all(x, axis=None, keepdims=False): """Bitwise reduction (logical AND). Arguments: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ axis = _normalize_axis(axis, ndim(x)) x = math_ops.cast(x, dtypes_module.bool) return math_ops.reduce_all(x, reduction_indices=axis, keep_dims=keepdims)
def equal(x, y): """Element-wise equality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.equal(x, y)
def not_equal(x, y): """Element-wise inequality between two tensors. Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.not_equal(x, y)
def greater(x, y): """Element-wise truth value of (x > y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.greater(x, y)
def greater_equal(x, y): """Element-wise truth value of (x >= y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.greater_equal(x, y)
def less_equal(x, y): """Element-wise truth value of (x <= y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.less_equal(x, y)
def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Arguments: condition: scalar tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. """ if condition.dtype != dtypes_module.bool: condition = math_ops.cast(condition, 'bool') if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn) return x
def on_train_begin(self, logs=None): if self.append: if os.path.exists(self.filename): with open(self.filename, 'r' + self.file_flags) as f: self.append_header = not bool(len(f.readline())) self.csv_file = open(self.filename, 'a' + self.file_flags) else: self.csv_file = open(self.filename, 'w' + self.file_flags)
def _make_test_csv(): f = tempfile.NamedTemporaryFile( dir=tf.test.get_temp_dir(), delete=False, mode="w") w = csv.writer(f) w.writerow(["int", "float", "bool", "string"]) for _ in range(100): intvalue = np.random.randint(-10, 10) floatvalue = np.random.rand() boolvalue = int(np.random.rand() > 0.3) stringvalue = "S: %.4f" % np.random.rand() row = [intvalue, floatvalue, boolvalue, stringvalue] w.writerow(row) f.close() return f.name
def testFromCSVWithFeatureSpec(self): if not HAS_PANDAS: return num_batches = 100 batch_size = 8 data_path = _make_test_csv_sparse() feature_spec = { "int": tf.FixedLenFeature(None, dtypes.int16, np.nan), "float": tf.VarLenFeature(dtypes.float16), "bool": tf.VarLenFeature(dtypes.bool), "string": tf.FixedLenFeature(None, dtypes.string, "") } pandas_df = pd.read_csv(data_path, dtype={"string": object}) # Pandas insanely uses NaN for empty cells in a string column. # And, we can't use Pandas replace() to fix them because nan != nan s = pandas_df["string"] for i in range(0, len(s)): if isinstance(s[i], float) and math.isnan(s[i]): pandas_df.set_value(i, "string", "") tensorflow_df = df.TensorFlowDataFrame.from_csv_with_feature_spec( [data_path], batch_size=batch_size, shuffle=False, feature_spec=feature_spec) # These columns were sparse; re-densify them for comparison tensorflow_df["float"] = densify.Densify(np.nan)(tensorflow_df["float"]) tensorflow_df["bool"] = densify.Densify(np.nan)(tensorflow_df["bool"]) self._assert_pandas_equals_tensorflow(pandas_df, tensorflow_df, num_batches=num_batches, batch_size=batch_size)
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"): """Boolean mask for `SparseTensor`s. Args: sparse_tensor: a `SparseTensor`. mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension of `sparse_tensor`. name: optional name for this operation. Returns: A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is `True`. """ # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`. with ops.name_scope(name, values=[sparse_tensor, mask]): mask = ops.convert_to_tensor(mask) mask_rows = array_ops.where(mask) first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices, [0, 0], [-1, 1])) # Identify indices corresponding to the rows identified by mask_rows. sparse_entry_matches = functional_ops.map_fn( lambda x: math_ops.equal(first_indices, x), mask_rows, dtype=dtypes.bool) # Combine the rows of index_matches to form a mask for the sparse indices # and values. to_retain = array_ops.reshape( functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1]) return sparse_ops.sparse_retain(sparse_tensor, to_retain)
def summarize_tensor(tensor, tag=None): """Summarize a tensor using a suitable summary type. This function adds a summary op for `tensor`. The type of summary depends on the shape of `tensor`. For scalars, a `scalar_summary` is created, for all other tensors, `histogram_summary` is used. Args: tensor: The tensor to summarize tag: The tag to use, if None then use tensor's op's name. Returns: The summary op created or None for string tensors. """ # Skips string tensors and boolean tensors (not handled by the summaries). if (tensor.dtype.is_compatible_with(dtypes.string) or tensor.dtype.base_dtype == dtypes.bool): return None if tensor.get_shape().ndims == 0: # For scalars, use a scalar summary. return _add_scalar_summary(tensor, tag) else: # We may land in here if the rank is still unknown. The histogram won't # hurt if this ends up being a scalar. return _add_histogram_summary(tensor, tag)
def _mask_weights(mask=None, weights=None): """Mask a given set of weights. Elements are included when the corresponding `mask` element is `False`, and excluded otherwise. Args: mask: An optional, `bool` `Tensor`. weights: An optional `Tensor` whose shape matches `mask` if `mask` is not `None`. Returns: Masked weights if `mask` and `weights` are not `None`, weights equivalent to `mask` if `weights` is `None`, and otherwise `weights`. Raises: ValueError: If `weights` and `mask` are not `None` and have mismatched shapes. """ if mask is not None: check_ops.assert_type(mask, dtypes.bool) if weights is None: weights = array_ops.ones_like(mask, dtype=dtypes.float32) weights = math_ops.cast(math_ops.logical_not(mask), weights.dtype) * weights return weights
def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None): """Sums the weights of cases where the given values are True. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `bool` `Tensor` of arbitrary size. weights: An optional `Tensor` whose shape is broadcastable to `values`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A tensor representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = _create_local('count', shape=[]) values = math_ops.to_float(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value_tensor, update_op
def _streaming_true_positives(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of true_positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: predictions: The predicted values, a `bool` `Tensor` of arbitrary dimensions. labels: The ground truth values, a `bool` `Tensor` whose dimensions must match `predictions`. weights: An optional `Tensor` whose shape is broadcastable to `predictions`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A tensor representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'true_positives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_true_positive = math_ops.logical_and(math_ops.equal(labels, 1), math_ops.equal(predictions, 1)) return _count_condition(is_true_positive, weights, metrics_collections, updates_collections)
def _streaming_false_positives(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Sum the weights of false positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: predictions: The predicted values, a `bool` `Tensor` of arbitrary dimensions. labels: The ground truth values, a `bool` `Tensor` whose dimensions must match `predictions`. weights: An optional `Tensor` whose shape is broadcastable to `predictions`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A tensor representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `predictions` and `labels` have mismatched shapes, or if `weights` is not `None` and its shape doesn't match `predictions`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'false_positives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_false_positive = math_ops.logical_and(math_ops.equal(labels, 0), math_ops.equal(predictions, 1)) return _count_condition(is_false_positive, weights, metrics_collections, updates_collections)
def _check_labels_and_scores(boolean_labels, scores, check_shape): """Check the rank of labels/scores, return tensor versions.""" with ops.name_scope('_check_labels_and_scores', values=[boolean_labels, scores]): boolean_labels = ops.convert_to_tensor(boolean_labels, name='boolean_labels') scores = ops.convert_to_tensor(scores, name='scores') if boolean_labels.dtype != dtypes.bool: raise ValueError( 'Argument boolean_labels should have dtype bool. Found: %s', boolean_labels.dtype) if check_shape: labels_rank_1 = control_flow_ops.Assert( math_ops.equal(1, array_ops.rank(boolean_labels)), ['Argument boolean_labels should have rank 1. Found: ', boolean_labels.name, array_ops.shape(boolean_labels)]) scores_rank_1 = control_flow_ops.Assert( math_ops.equal(1, array_ops.rank(scores)), ['Argument scores should have rank 1. Found: ', scores.name, array_ops.shape(scores)]) with ops.control_dependencies([labels_rank_1, scores_rank_1]): return boolean_labels, scores else: return boolean_labels, scores
def accuracy(predictions, labels, weights=None): """Computes the percentage of times that predictions matches labels. Args: predictions: the predicted values, a `Tensor` whose dtype and shape matches 'labels'. labels: the ground truth values, a `Tensor` of any shape and bool, integer, or string dtype. weights: None or `Tensor` of float values to reweight the accuracy. Returns: Accuracy `Tensor`. Raises: ValueError: if dtypes don't match or if dtype is not bool, integer, or string. """ if not (labels.dtype.is_integer or labels.dtype in (dtypes.bool, dtypes.string)): raise ValueError( 'Labels should have bool, integer, or string dtype, not %r' % labels.dtype) if not labels.dtype.is_compatible_with(predictions.dtype): raise ValueError('Dtypes of predictions and labels should match. ' 'Given: predictions (%r) and labels (%r)' % (predictions.dtype, labels.dtype)) with ops.name_scope('accuracy', values=[predictions, labels]): is_correct = math_ops.cast( math_ops.equal(predictions, labels), dtypes.float32) if weights is not None: is_correct = math_ops.mul(is_correct, weights) return math_ops.reduce_mean(is_correct)
def _dtype_to_nan(dtype): if dtype is dtypes.string: return b"" elif dtype.is_integer: return np.nan elif dtype.is_floating: return np.nan elif dtype is dtypes.bool: return np.nan else: raise ValueError("Can't parse type without NaN into sparse tensor: %s" % dtype)
def _streaming_false_negatives(predictions, labels, weights=None, metrics_collections=None, updates_collections=None, name=None): """Computes the total number of false positives. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: predictions: The predicted values, a `bool` `Tensor` of arbitrary dimensions. labels: The ground truth values, a `bool` `Tensor` whose dimensions must match `predictions`. weights: An optional `Tensor` whose shape is broadcastable to `predictions`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A tensor representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ with variable_scope.variable_scope( name, 'false_negatives', [predictions, labels]): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) is_false_negative = math_ops.logical_and(math_ops.equal(labels, 1), math_ops.equal(predictions, 0)) return _count_condition(is_false_negative, weights, metrics_collections, updates_collections)