我们从Python开源项目中,提取了以下22个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.boolean_mask()。
def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ input_tensor = input_tensors[0] mask = input_tensors[1] if mask.get_shape().ndims > 1: mask = array_ops.squeeze(mask) if isinstance(input_tensor, ops.SparseTensor): mask_fn = sparse_boolean_mask else: mask_fn = array_ops.boolean_mask # pylint: disable=not-callable return self.return_type(mask_fn(input_tensor, mask))
def _make_auc_histograms(boolean_labels, scores, score_range, nbins): """Create histogram tensors from one batch of labels/scores.""" with variable_scope.variable_scope( None, 'make_auc_histograms', [boolean_labels, scores, nbins]): # Histogram of scores for records in this batch with True label. hist_true = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, boolean_labels), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_true') # Histogram of scores for records in this batch with False label. hist_false = histogram_ops.histogram_fixed_width( array_ops.boolean_mask(scores, math_ops.logical_not(boolean_labels)), score_range, nbins=nbins, dtype=dtypes.int64, name='hist_false') return hist_true, hist_false
def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ input_tensor = input_tensors[0] mask = input_tensors[1] if mask.get_shape().ndims > 1: mask = array_ops.squeeze(mask) if isinstance(input_tensor, sparse_tensor_py.SparseTensor): mask_fn = sparse_boolean_mask else: mask_fn = array_ops.boolean_mask # pylint: disable=not-callable return self.return_type(mask_fn(input_tensor, mask))
def call(self, inputs): boolean_mask = K.any( K.not_equal(inputs, self.mask_value), axis=-1, keepdims=True) return inputs * K.cast(boolean_mask, K.floatx())
def _mask_activations_and_targets(activations, targets, sequence_lengths): """Remove entries outside `sequence_lengths` and returned flattened results. Args: activations: output of the RNN, shape `[batch_size, padded_length, k]`. targets: target values, shape `[batch_size, padded_length]`. sequence_lengths: a `Tensor` of shape `[batch_size]` with the unpadded length of each sequence. If `None`, then each sequence is unpadded. Returns: activations_masked: `logit` values with those beyond `sequence_lengths` removed for each batch. Batches are then concatenated. Shape `[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and shape `[batch_size * padded_length, k]` otherwise. targets_masked: target values after removing unneeded entries. Shape `[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape `[batch_size * padded_length]` otherwise. """ with ops.name_scope('mask_activations_and_targets', values=[activations, targets, sequence_lengths]): targets_shape = array_ops.shape(targets) batch_size = targets_shape[0] padded_length = targets_shape[1] if sequence_lengths is None: flattened_dimension = padded_length * batch_size activations_masked = array_ops.reshape(activations, [flattened_dimension, -1]) targets_masked = array_ops.reshape(targets, [flattened_dimension]) else: mask = _padding_mask(sequence_lengths, padded_length) activations_masked = array_ops.boolean_mask(activations, mask) targets_masked = array_ops.boolean_mask(targets, mask) return activations_masked, targets_masked
def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ d = input_tensors[0] if self.strip_value is np.nan: strip_hot = math_ops.is_nan(d) else: strip_hot = math_ops.equal(d, array_ops.constant([self.strip_value], dtype=d.dtype)) keep_hot = math_ops.logical_not(strip_hot) length = array_ops.reshape(array_ops.shape(d), []) indices = array_ops.boolean_mask(math_ops.range(length), keep_hot) values = array_ops.boolean_mask(d, keep_hot) sparse_indices = array_ops.reshape( math_ops.cast(indices, dtypes.int64), [-1, 1]) shape = math_ops.cast(array_ops.shape(d), dtypes.int64) # pylint: disable=not-callable return self.return_type(ops.SparseTensor(sparse_indices, values, shape))
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"): """Boolean mask for `SparseTensor`s. Args: sparse_tensor: a `SparseTensor`. mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension of `sparse_tensor`. name: optional name for this operation. Returns: A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is `True`. """ # TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`. with ops.name_scope(name, values=[sparse_tensor, mask]): mask = ops.convert_to_tensor(mask) mask_rows = array_ops.where(mask) first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices, [0, 0], [-1, 1])) # Identify indices corresponding to the rows identified by mask_rows. sparse_entry_matches = functional_ops.map_fn( lambda x: math_ops.equal(first_indices, x), mask_rows, dtype=dtypes.bool) # Combine the rows of index_matches to form a mask for the sparse indices # and values. to_retain = array_ops.reshape( functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1]) return sparse_ops.sparse_retain(sparse_tensor, to_retain)
def _apply_transform(self, input_tensors, **kwargs): """Applies the transformation to the `transform_input`. Args: input_tensors: a list of Tensors representing the input to the Transform. **kwargs: Additional keyword arguments, unused here. Returns: A namedtuple of Tensors representing the transformed output. """ d = input_tensors[0] if self.strip_value is np.nan: strip_hot = math_ops.is_nan(d) else: strip_hot = math_ops.equal(d, array_ops.constant([self.strip_value], dtype=d.dtype)) keep_hot = math_ops.logical_not(strip_hot) length = array_ops.reshape(array_ops.shape(d), []) indices = array_ops.boolean_mask(math_ops.range(length), keep_hot) values = array_ops.boolean_mask(d, keep_hot) sparse_indices = array_ops.reshape( math_ops.cast(indices, dtypes.int64), [-1, 1]) shape = math_ops.cast(array_ops.shape(d), dtypes.int64) # pylint: disable=not-callable return self.return_type( sparse_tensor.SparseTensor(sparse_indices, values, shape))
def boolean_mask(labeled_tensor, mask, name=None): """Apply a boolean mask to a labeled tensor. Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks. The mask is applied to the first axis of `labeled_tensor`. Labels on the first axis are removed, because True indices in `mask` may not be known dynamically. Args: labeled_tensor: The input tensor. mask: The type of the returned tensor. name: Optional op name. Returns: The masked labeled tensor. Raises: ValueError: if the first axis of the mask """ with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) mask = core.convert_to_labeled_tensor(mask) if len(mask.axes) > 1: raise NotImplementedError( "LabeledTensor's boolean_mask currently only supports 1D masks") mask_axis = list(mask.axes.values())[0] lt_axis = list(labeled_tensor.axes.values())[0] if mask_axis != lt_axis: raise ValueError('the first axis of the labeled tensor and the mask ' 'are not equal:\n%r\n%r' % (lt_axis, mask_axis)) op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope) # TODO(shoyer): attempt to infer labels for the masked values, by calling # tf.contrib.util.constant_value on the mask? axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:] return core.LabeledTensor(op, axes)
def _get_examples(file_name_queue, reader, num_threads, read_batch_size, filter_fn, parse_fn): with ops.name_scope('read'): example_list = [] for _ in range(num_threads): if read_batch_size > 1: keys, examples_proto = reader().read_up_to(file_name_queue, read_batch_size) else: keys, examples_proto = reader().read(file_name_queue) if filter_fn: mask = filter_fn(keys, examples_proto) keys = array_ops.boolean_mask(keys, mask) examples_proto = array_ops.boolean_mask(examples_proto, mask) if parse_fn: parsed_examples = parse_fn(examples_proto) # Map keys into example map because batch_join doesn't support # tuple of Tensor + dict. if isinstance(parsed_examples, dict): parsed_examples[KEY_FEATURE_NAME] = keys example_list.append(parsed_examples) else: example_list.append((keys, parsed_examples)) else: example_list.append((keys, examples_proto)) return example_list
def mask_activations_and_labels(activations, labels, sequence_lengths): """Remove entries outside `sequence_lengths` and returned flattened results. Args: activations: Output of the RNN, shape `[batch_size, padded_length, k]`. labels: Label values, shape `[batch_size, padded_length]`. sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded length of each sequence. If `None`, then each sequence is unpadded. Returns: activations_masked: `logit` values with those beyond `sequence_lengths` removed for each batch. Batches are then concatenated. Shape `[tf.sum(sequence_lengths), k]` if `sequence_lengths` is not `None` and shape `[batch_size * padded_length, k]` otherwise. labels_masked: Label values after removing unneeded entries. Shape `[tf.sum(sequence_lengths)]` if `sequence_lengths` is not `None` and shape `[batch_size * padded_length]` otherwise. """ with ops.name_scope('mask_activations_and_labels', values=[activations, labels, sequence_lengths]): labels_shape = array_ops.shape(labels) batch_size = labels_shape[0] padded_length = labels_shape[1] if sequence_lengths is None: flattened_dimension = padded_length * batch_size activations_masked = array_ops.reshape(activations, [flattened_dimension, -1]) labels_masked = array_ops.reshape(labels, [flattened_dimension]) else: mask = array_ops.sequence_mask(sequence_lengths, padded_length) activations_masked = array_ops.boolean_mask(activations, mask) labels_masked = array_ops.boolean_mask(labels, mask) return activations_masked, labels_masked
def test_name(self): mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0]) masked_lt = ops.boolean_mask(self.original_lt, mask) self.assertIn('lt_boolean_mask', masked_lt.name)
def test(self): mask = core.LabeledTensor(math_ops.range(7) > 3, [self.a0]) masked_lt = ops.boolean_mask(self.original_lt, mask) golden_lt = core.LabeledTensor( array_ops.boolean_mask(self.original_lt.tensor, mask.tensor), ['x', self.a1, self.a2, self.a3]) self.assertLabeledTensorsEqual(masked_lt, golden_lt)
def test_invalid_rank(self): mask = core.LabeledTensor(array_ops.ones((7, 3)) > 3, [self.a0, self.a1]) with self.assertRaises(NotImplementedError): ops.boolean_mask(self.original_lt, mask)
def ctc_label_dense_to_sparse(labels, label_lengths): """Converts CTC labels from dense to sparse. Arguments: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the lablels. """ label_shape = array_ops.shape(labels) num_batches_tns = array_ops.stack([label_shape[0]]) max_num_labels_tns = array_ops.stack([label_shape[1]]) def range_less_than(_, current_input): return array_ops.expand_dims( math_ops.range(label_shape[1]), 0) < array_ops.fill( max_num_labels_tns, current_input) init = math_ops.cast( array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool) dense_mask = functional_ops.scan( range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = array_ops.reshape( array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = array_ops.boolean_mask(label_array, dense_mask) batch_array = array_ops.transpose( array_ops.reshape( array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = array_ops.boolean_mask(batch_array, dense_mask) indices = array_ops.transpose( array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = array_ops.gather_nd(labels, indices) return sparse_tensor.SparseTensor( math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))