我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.to_float()。
def _lengths_to_masks(lengths, max_length): """Creates a binary matrix that can be used to mask away padding. Args: lengths: A vector of integers representing lengths. max_length: An integer indicating the maximum length. All values in lengths should be less than max_length. Returns: masks: Masks that can be used to get rid of padding. """ tiled_ranges = array_ops.tile( array_ops.expand_dims(math_ops.range(max_length), 0), [array_ops.shape(lengths)[0], 1]) lengths = array_ops.expand_dims(lengths, 1) masks = math_ops.to_float( math_ops.to_int64(tiled_ranges) < math_ops.to_int64(lengths)) return masks
def insert_transformed_feature(self, columns_to_tensors): """Apply transformation and inserts it into columns_to_tensors. Args: columns_to_tensors: A mapping from feature columns to tensors. 'string' key means a base feature (not-transformed). It can have _FeatureColumn as a key too. That means that _FeatureColumn is already transformed. """ # Transform the input tensor according to the normalizer function + reshape. input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name]) batch_size = input_tensor.get_shape().as_list()[0] batch_size = int(batch_size) if batch_size else -1 flattened_shape = [batch_size, self.dimension] columns_to_tensors[self] = array_ops.reshape( math_ops.to_float(input_tensor), flattened_shape, name="reshape") # pylint: disable=unused-argument
def loss(self, data, labels): """The loss to minimize while training.""" if self.is_regression: diff = self.training_inference_graph(data) - math_ops.to_float(labels) mean_squared_error = math_ops.reduce_mean(diff * diff) root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss") loss = root_mean_squared_error else: loss = math_ops.reduce_mean( nn_ops.sparse_softmax_cross_entropy_with_logits( self.training_inference_graph(data), array_ops.squeeze(math_ops.to_int32(labels))), name="loss") if self.regularizer: loss += layers.apply_regularization(self.regularizer, variables.trainable_variables()) return loss
def hinge_loss(logits, labels=None, scope=None, target=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. scope: The scope for the operations performed in computing the loss. target: Deprecated alias for `labels`. Returns: A `Tensor` of same shape as logits and target representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ labels = _labels(labels, target) with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.sub(2 * labels, all_ones) return nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(labels, logits)))
def _get_loss(self, features, labels, data_spec=None): """Constructs, caches, and returns the inference-based loss.""" if self._loss is not None: return self._loss def _average_loss(): probs = self.inference_graph(features, data_spec=data_spec) return math_ops.reduce_sum(self.loss_fn( probs, labels)) / math_ops.to_float( array_ops.shape(features)[0]) self._loss = control_flow_ops.cond( self.average_size() > 0, _average_loss, lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32)) return self._loss
def PearsonCorrelationTF(x, y, prefix='pearson'): '''Create a TF network that calculates the Pearson Correlation on two input vectors. Returns a scalar tensor with the correlation [-1:1].''' with tf.name_scope(prefix): n = tf.to_float(tf.shape(x)[0]) x_sum = tf.reduce_sum(x) y_sum = tf.reduce_sum(y) xy_sum = tf.reduce_sum(tf.multiply(x, y)) x2_sum = tf.reduce_sum(tf.multiply(x, x)) y2_sum = tf.reduce_sum(tf.multiply(y, y)) r_num = tf.subtract(tf.multiply(n, xy_sum), tf.multiply(x_sum, y_sum)) r_den_x = tf.sqrt(tf.subtract(tf.multiply(n, x2_sum), tf.multiply(x_sum, x_sum))) r_den_y = tf.sqrt(tf.subtract(tf.multiply(n, y2_sum), tf.multiply(y_sum, y_sum))) r = tf.div(r_num, tf.multiply(r_den_x, r_den_y), name='r') return r
def hinge_loss(logits, labels=None, scope=None): """Method that returns the loss tensor for hinge loss. Args: logits: The logits, a float tensor. labels: The ground truth output tensor. Its shape should match the shape of logits. The values of the tensor are expected to be 0.0 or 1.0. scope: The scope for the operations performed in computing the loss. Returns: A `Tensor` of same shape as `logits` and `labels` representing the loss values across the batch. Raises: ValueError: If the shapes of `logits` and `labels` don't match. """ with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope: logits.get_shape().assert_is_compatible_with(labels.get_shape()) # We first need to convert binary labels to -1/1 labels (as floats). labels = math_ops.to_float(labels) all_ones = array_ops.ones_like(labels) labels = math_ops.subtract(2 * labels, all_ones) return nn_ops.relu( math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
def testVars(self): metrics.streaming_pearson_correlation( predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones( [10, 10]), labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10])) _assert_local_variables(self, ( 'pearson_r/covariance/comoment:0', 'pearson_r/covariance/count:0', 'pearson_r/covariance/mean_label:0', 'pearson_r/covariance/mean_prediction:0', 'pearson_r/variance_labels/count:0', 'pearson_r/variance_labels/comoment:0', 'pearson_r/variance_labels/mean_label:0', 'pearson_r/variance_labels/mean_prediction:0', 'pearson_r/variance_predictions/comoment:0', 'pearson_r/variance_predictions/count:0', 'pearson_r/variance_predictions/mean_label:0', 'pearson_r/variance_predictions/mean_prediction:0',))
def _covariance(x, diag): """Defines the covariance operation of a matrix. Args: x: a matrix Tensor. Dimension 0 should contain the number of examples. diag: if True, it computes the diagonal covariance. Returns: A Tensor representing the covariance of x. In the case of diagonal matrix just the diagonal is returned. """ num_points = math_ops.to_float(array_ops.shape(x)[0]) x -= math_ops.reduce_mean(x, 0, keep_dims=True) if diag: cov = math_ops.reduce_sum( math_ops.square(x), 0, keep_dims=True) / (num_points - 1) else: cov = math_ops.matmul(x, x, transpose_a=True) / (num_points - 1) return cov
def _define_diag_covariance_probs(self, shard_id, shard): """Defines the diagonal covariance probabilities per example in a class. Args: shard_id: id of the current shard. shard: current data shard, 1 X num_examples X dimensions. Returns a matrix num_examples * num_classes. """ # num_classes X 1 # TODO(xavigonzalvo): look into alternatives to log for # reparametrization of variance parameters. det_expanded = math_ops.reduce_sum( math_ops.log(self._covs + 1e-3), 1, keep_dims=True) diff = shard - self._means x2 = math_ops.square(diff) cov_expanded = array_ops.expand_dims(1.0 / (self._covs + 1e-3), 2) # num_classes X num_examples x2_cov = math_ops.matmul(x2, cov_expanded) x2_cov = array_ops.transpose(array_ops.squeeze(x2_cov, [2])) self._probs[shard_id] = -0.5 * ( math_ops.to_float(self._dimensions) * math_ops.log(2.0 * np.pi) + array_ops.transpose(det_expanded) + x2_cov)
def compute_weighted_loss(losses, weight=1.0): """Computes the weighted loss. Args: losses: A tensor of size [batch_size, d1, ... dN]. weight: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If the weight is None or the shape is not compatible with the losses shape or if the number of dimensions (rank) of either losses or weight is missing. """ if weight is None: raise ValueError("`weight` cannot be None") input_dtype = losses.dtype losses = math_ops.to_float(losses) weight = math_ops.to_float(ops.convert_to_tensor(weight)) if losses.get_shape().ndims is None: raise ValueError("losses.get_shape().ndims cannot be None") if weight.get_shape().ndims is None: raise ValueError("weight.get_shape().ndims cannot be None") total_loss = _scale_losses(losses, weight) num_present = _num_present(losses, weight) mean_loss = _safe_mean(total_loss, num_present) # convert the result back to the input type mean_loss = math_ops.cast(mean_loss, input_dtype) add_loss(mean_loss) return mean_loss
def absolute_difference(predictions, targets, weight=1.0, scope=None): """Adds an Absolute Difference loss to the training procedure. `weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weight` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weight` vector. If the shape of `weight` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weight`. Args: predictions: The predicted outputs. targets: The ground truth output tensor, same dimensions as 'predictions'. weight: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `targets` or if the shape of `weight` is invalid. """ with ops.name_scope(scope, "absolute_difference", [predictions, targets]) as scope: predictions.get_shape().assert_is_compatible_with(targets.get_shape()) if weight is None: raise ValueError("`weight` cannot be None") predictions = math_ops.to_float(predictions) targets = math_ops.to_float(targets) losses = math_ops.abs(math_ops.sub(predictions, targets)) return compute_weighted_loss(losses, weight)
def log_loss(predictions, targets, weight=1.0, epsilon=1e-7, scope=None): """Adds a Log Loss term to the training procedure. `weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weight` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weight` vector. If the shape of `weight` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weight`. Args: predictions: The predicted outputs. targets: The ground truth output tensor, same dimensions as 'predictions'. weight: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `targets` or if the shape of `weight` is invalid. """ with ops.name_scope(scope, "log_loss", [predictions, targets]) as scope: predictions.get_shape().assert_is_compatible_with(targets.get_shape()) if weight is None: raise ValueError("`weight` cannot be None") predictions = math_ops.to_float(predictions) targets = math_ops.to_float(targets) losses = -math_ops.mul( targets, math_ops.log(predictions + epsilon)) - math_ops.mul( (1 - targets), math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss(losses, weight)
def sum_of_squares(predictions, targets, weight=1.0, scope=None): """Adds a Sum-of-Squares loss to the training procedure. `weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weight` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weight` vector. If the shape of `weight` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weight`. Args: predictions: The predicted outputs. targets: The ground truth output tensor, same dimensions as 'predictions'. weight: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `targets` or if the shape of `weight` is invalid. """ with ops.name_scope(scope, "sum_of_squares_loss", [predictions, targets]) as scope: predictions.get_shape().assert_is_compatible_with(targets.get_shape()) if weight is None: raise ValueError("`weight` cannot be None") predictions = math_ops.to_float(predictions) targets = math_ops.to_float(targets) losses = math_ops.square(math_ops.sub(predictions, targets)) return compute_weighted_loss(losses, weight)
def cosine_distance(predictions, targets, dim, weight=1.0, scope=None): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that the predictions and targets are already unit-normalized. Args: predictions: An arbitrary matrix. targets: A `Tensor` whose shape matches 'predictions' dim: The dimension along which the cosine distance is computed. weight: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If predictions.shape doesn't match targets.shape, if the ignore mask is provided and its shape doesn't match targets.shape or if the ignore mask is not boolean valued. """ with ops.name_scope(scope, "cosine_distance_loss", [predictions, targets]) as scope: predictions.get_shape().assert_is_compatible_with(targets.get_shape()) if weight is None: raise ValueError("`weight` cannot be None") predictions = math_ops.to_float(predictions) targets = math_ops.to_float(targets) radial_diffs = math_ops.mul(predictions, targets) losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,]) return compute_weighted_loss(losses, weight)
def _get_weight_tensor(features, weight_column_name): """Returns the weight tensor of shape [batch_size] or 1.""" if weight_column_name is None: return 1.0 else: return array_ops.reshape( math_ops.to_float(features[weight_column_name]), shape=(-1,))
def _make_streaming_with_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, targets): return streaming_metrics_fn(predictions=math_ops.to_float( math_ops.greater_equal(predictions, threshold)), labels=targets) return _streaming_metrics
def _wrap_metric(metric): """Wraps metrics for mismatched prediction/target types.""" def wrapped(preds, targets): targets = math_ops.cast(targets, preds.dtype) return metric(preds, targets) def wrapped_weights(preds, targets, weights=None): targets = math_ops.cast(targets, preds.dtype) if weights is not None: weights = array_ops.reshape(math_ops.to_float(weights), shape=(-1,)) return metric(preds, targets, weights) return wrapped_weights if "weights" in _get_metric_args(metric) else wrapped
def _weighted_loss(loss, weight_tensor): unweighted_loss = array_ops.reshape(loss, shape=(-1,)) weighted_loss = math_ops.mul( unweighted_loss, array_ops.reshape(weight_tensor, shape=(-1,))) return math_ops.div( math_ops.reduce_sum(weighted_loss), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss")
def get_weight_tensor(self, features): if not self._weight_column_name: return None else: return array_ops.reshape( math_ops.to_float(features[self._weight_column_name]), shape=(-1,))
def loss(self, logits, target, features): """Returns loss tensor for this head. The loss returned is the weighted average. L = sum_{i} w_{i} * l_{i} / sum_{i} w_{i} Args: logits: logits, a float tensor. target: either a tensor for labels or in multihead case, a dict of string to target tensor. features: features dict. Returns: Loss tensor. """ target = target[self.name] if isinstance(target, dict) else target loss_unweighted = self._loss_fn(logits, target) weight_tensor = self.get_weight_tensor(features) if weight_tensor is None: return math_ops.reduce_mean(loss_unweighted, name="loss") loss_weighted = self._weighted_loss(loss_unweighted, weight_tensor) return math_ops.div( math_ops.reduce_sum(loss_weighted), math_ops.to_float(math_ops.reduce_sum(weight_tensor)), name="loss")
def _mean_squared_loss(logits, target): # To prevent broadcasting inside "-". if len(target.get_shape()) == 1: target = array_ops.expand_dims(target, dim=[1]) logits.get_shape().assert_is_compatible_with(target.get_shape()) return math_ops.square(logits - math_ops.to_float(target))
def _float_weights_or_none(weights): if weights is None: return None return math_ops.to_float(weights)
def _accuracy_at_threshold(threshold): def _accuracy_metric(predictions, targets, weights=None): threshold_predictions = math_ops.to_float( math_ops.greater_equal(predictions, threshold)) return metrics_lib.streaming_accuracy(predictions=threshold_predictions, labels=targets, weights=weights) return _accuracy_metric
def _count_condition(values, weights=None, metrics_collections=None, updates_collections=None): """Sums the weights of cases where the given values are True. If `weights` is `None`, weights default to 1. Use weights of 0 to mask values. Args: values: A `bool` `Tensor` of arbitrary size. weights: An optional `Tensor` whose shape is broadcastable to `values`. metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. Returns: value_tensor: A tensor representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If `weights` is not `None` and its shape doesn't match `values`, or if either `metrics_collections` or `updates_collections` are not a list or tuple. """ check_ops.assert_type(values, dtypes.bool) count = _create_local('count', shape=[]) values = math_ops.to_float(values) if weights is not None: weights = math_ops.to_float(weights) values = math_ops.mul(values, weights) value_tensor = array_ops.identity(count) update_op = state_ops.assign_add(count, math_ops.reduce_sum(values)) if metrics_collections: ops.add_to_collections(metrics_collections, value_tensor) if updates_collections: ops.add_to_collections(updates_collections, update_op) return value_tensor, update_op
def compute_weighted_loss( losses, weights=_WEIGHT_SENTINEL, weight=_WEIGHT_SENTINEL): """Computes the weighted loss. Args: losses: A tensor of size [batch_size, d1, ... dN]. weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N. weight: Deprecated alias for `weights`. Returns: A scalar `Tensor` that returns the weighted loss. Raises: ValueError: If `weights` is `None` or the shape is not compatible with `losses`, or if the number of dimensions (rank) of either `losses` or `weights` is missing. """ weights = _weights(weights, weight) losses = ops.convert_to_tensor(losses) input_dtype = losses.dtype losses = math_ops.to_float(losses) weights = math_ops.to_float(ops.convert_to_tensor(weights)) if losses.get_shape().ndims is None: raise ValueError("losses.get_shape().ndims cannot be None") weights_shape = weights.get_shape() if weights_shape.ndims is None: raise ValueError("weight.get_shape().ndims cannot be None") if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1): weights = array_ops.squeeze(weights, [-1]) total_loss = _scale_losses(losses, weights) num_present = _num_present(losses, weights) mean_loss = _safe_mean(total_loss, num_present) # convert the result back to the input type mean_loss = math_ops.cast(mean_loss, input_dtype) add_loss(mean_loss) return mean_loss
def mean_squared_error( predictions, labels=None, weights=_WEIGHT_SENTINEL, scope=None, targets=None, weight=_WEIGHT_SENTINEL): """Adds a Sum-of-Squares loss to the training procedure. `weight` acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If `weight` is a tensor of size [batch_size], then the total loss for each sample of the batch is rescaled by the corresponding element in the `weight` vector. If the shape of `weight` matches the shape of `predictions`, then the loss of each measurable element of `predictions` is scaled by the corresponding value of `weight`. Args: predictions: The predicted outputs. labels: The ground truth output tensor, same dimensions as 'predictions'. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. targets: Deprecated alias for `labels`. weight: Deprecated alias for `weights`. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If the shape of `predictions` doesn't match that of `labels` or if the shape of `weight` is invalid. """ labels = _labels(labels, targets) weights = _weights(weights, weight) with ops.name_scope(scope, "mean_squared_error", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) losses = math_ops.square(math_ops.sub(predictions, labels)) return compute_weighted_loss(losses, weights)
def cosine_distance( predictions, labels=None, dim=None, weights=_WEIGHT_SENTINEL, scope=None, targets=None, weight=_WEIGHT_SENTINEL): """Adds a cosine-distance loss to the training procedure. Note that the function assumes that `predictions` and `labels` are already unit-normalized. Args: predictions: An arbitrary matrix. labels: A `Tensor` whose shape matches 'predictions' dim: The dimension along which the cosine distance is computed. weights: Coefficients for the loss a scalar, a tensor of shape [batch_size] or a tensor whose shape matches `predictions`. scope: The scope for the operations performed in computing the loss. targets: Deprecated alias for `labels`. weight: Deprecated alias for `weights`. Returns: A scalar `Tensor` representing the loss value. Raises: ValueError: If `predictions` shape doesn't match `labels` shape, or `weights` is `None`. """ labels = _labels(labels, targets) weights = _weights(weights, weight) if dim is None: raise ValueError("`dim` cannot be None.") with ops.name_scope(scope, "cosine_distance_loss", [predictions, labels, weights]) as scope: predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = math_ops.to_float(predictions) labels = math_ops.to_float(labels) radial_diffs = math_ops.mul(predictions, labels) losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,]) return compute_weighted_loss(losses, weights)
def _weight_tensor(features, weight_column_name): if not weight_column_name: return None else: return array_ops.reshape( math_ops.to_float(features[weight_column_name]), shape=(-1,))
def _mean_squared_loss(logits, labels): # To prevent broadcasting inside "-". if len(labels.get_shape()) == 1: labels = array_ops.expand_dims(labels, dim=[1]) # TODO(zakaria): make sure it does not recreate the broadcast bug. if len(logits.get_shape()) == 1: logits = array_ops.expand_dims(logits, dim=[1]) logits.get_shape().assert_is_compatible_with(labels.get_shape()) return math_ops.square(logits - math_ops.to_float(labels))
def _log_loss_with_two_classes(logits, labels): # sigmoid_cross_entropy_with_logits requires [batch_size, 1] labels. if len(labels.get_shape()) == 1: labels = array_ops.expand_dims(labels, dim=[1]) loss_vec = nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(labels)) return loss_vec
def _sigmoid_cross_entropy_loss(logits, labels): # sigmoid_cross_entropy_with_logits requires [batch_size, n_classes] labels. return nn.sigmoid_cross_entropy_with_logits(logits, math_ops.to_float(labels))
def _weighted_average_loss_metric_spec(loss_fn, predictoin_key, label_key, weight_key): def _streaming_weighted_average_loss(predictions, labels, weights=None): loss_unweighted = loss_fn(predictions, labels) if weights is not None: weights = math_ops.to_float(weights) _, weighted_average_loss = _loss(loss_unweighted, weights, name="eval_loss") return metrics_lib.streaming_mean(weighted_average_loss) return metric_spec.MetricSpec(_streaming_weighted_average_loss, predictoin_key, label_key, weight_key)
def _make_streaming_with_threshold(streaming_metrics_fn, threshold): def _streaming_metrics(predictions, labels): return streaming_metrics_fn(predictions=math_ops.to_float( math_ops.greater_equal(predictions, threshold)), labels=labels) return _streaming_metrics
def _accuracy_at_threshold(threshold): def _accuracy_metric(predictions, labels, weights=None): threshold_predictions = math_ops.to_float( math_ops.greater_equal(predictions, threshold)) return metrics_lib.streaming_accuracy(predictions=threshold_predictions, labels=labels, weights=weights) return _accuracy_metric
def insert_transformed_feature(self, columns_to_tensors): """Apply transformation and inserts it into columns_to_tensors. Args: columns_to_tensors: A mapping from feature columns to tensors. 'string' key means a base feature (not-transformed). It can have _FeatureColumn as a key too. That means that _FeatureColumn is already transformed. """ # Transform the input tensor according to the normalizer function. input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name]) columns_to_tensors[self] = math_ops.to_float(input_tensor) # pylint: disable=unused-argument
def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): if input_tensor.dtype != dtypes.float32: input_tensor = math_ops.to_float(input_tensor) return _reshape_real_valued_tensor(input_tensor, output_rank, self.name)
def average_size(self): """Constructs a TF graph for evaluating the average size of a forest. Returns: The average number of nodes over the trees. """ sizes = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): sizes.append(self.trees[i].size()) return math_ops.reduce_mean(math_ops.to_float(array_ops.pack(sizes))) # pylint: disable=unused-argument
def _r2(probabilities, targets, weights=None): if targets.get_shape().ndims == 1: targets = array_ops.expand_dims(targets, -1) targets = math_ops.to_float(targets) y_mean = math_ops.reduce_mean(targets, 0) squares_total = math_ops.reduce_sum(math_ops.square(targets - y_mean), 0) squares_residuals = math_ops.reduce_sum(math_ops.square( targets - probabilities), 0) score = 1 - math_ops.reduce_sum(squares_residuals / squares_total) return metric_ops.streaming_mean(score, weights=weights)