我们从Python开源项目中,提取了以下33个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.fill()。
def _variance(self): var = (self._ones() * math_ops.square(self.sigma) * self.df / (self.df - 2)) # When 1 < df <= 2, variance is infinite. inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype()) result_where_defined = math_ops.select( math_ops.greater(self.df, array_ops.fill(self.batch_shape(), 2.)), var, array_ops.fill(self.batch_shape(), inf, name="inf")) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( math_ops.greater(self.df, self._ones()), result_where_defined, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.df, message="variance not defined for components of df <= 1"), ], result_where_defined)
def _mode(self): mode = (self.a - 1.)/ (self.a_b_sum - 2.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( math_ops.logical_and( math_ops.greater(self.a, 1.), math_ops.greater(self.b, 1.)), mode, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.a, message="Mode not defined for components of a <= 1."), check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.b, message="Mode not defined for components of b <= 1."), ], mode)
def _mode(self): mode = ((self.alpha - 1.) / (array_ops.expand_dims(self.alpha_sum, dim=-1) - math_ops.cast(self.event_shape()[0], self.dtype))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) shape = array_ops.concat(0, (self.batch_shape(), self.event_shape())) return math_ops.select( math_ops.greater(self.alpha, 1.), mode, array_ops.fill(shape, nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.alpha, message="mode not defined for components of alpha <= 1") ], mode)
def _mean(self): mean = self.loc * array_ops.ones(self.batch_shape(), dtype=self.dtype) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.greater( self.df, array_ops.ones(self.batch_shape(), dtype=self.dtype)), mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies( [ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.df, message="mean not defined for components of df <= 1"), ], mean)
def _mode(self): mode = (self.a - 1.)/ (self.a_b_sum - 2.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( math_ops.logical_and( math_ops.greater(self.a, 1.), math_ops.greater(self.b, 1.)), mode, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.a, message="Mode not defined for components of a <= 1."), check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.b, message="Mode not defined for components of b <= 1."), ], mode)
def _mode(self): mode = ((self.alpha - 1.) / (array_ops.expand_dims(self.alpha_sum, dim=-1) - math_ops.cast(self.event_shape()[0], self.dtype))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0) return array_ops.where( math_ops.greater(self.alpha, 1.), mode, array_ops.fill(shape, nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.alpha, message="mode not defined for components of alpha <= 1") ], mode)
def testFill(self): for dtype in self.numeric_types: self._testBinary( array_ops.fill, np.array([], dtype=np.int32), dtype(-42), expected=dtype(-42)) self._testBinary( array_ops.fill, np.array([1, 2], dtype=np.int32), dtype(7), expected=np.array([[7, 7]], dtype=dtype)) self._testBinary( array_ops.fill, np.array([3, 2], dtype=np.int32), dtype(50), expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype)) # Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def call(self, inputs, **kwargs): # pylint: disable=unused-argument """This is where the layer's logic lives. Arguments: inputs: Input tensor, or list/tuple of input tensors. **kwargs: Additional keyword arguments. Returns: A tensor or list/tuple of tensors. """ return inputs # 0. call tf.keras.Layer.__call__() on input tensor # 1. tf Layer __call__ is called # 2. tf.keras.Conv2D.build() is called # 3. link to tf.layers.Conv2D.build is called, inside, create kernel tensor and bias tensor # 4. back to tf.keras.Conv2D.build, constraints attributes are added # 5. tf.layers.convolutional.Conv2D.call is called: # create outputs from nn.convolution() # add bias tensor onto outputs # run activation on outputs # back to tf.layers.__call__(): # 6. run _add_inbound_node: # fill in inbound_layers, node_indices, input_tensors, outputs_tensors based on info from input_tensors # create a node and store this node inside inbound_layer # add _keras_history to output_tensors # 7. other attributes added
def num_relevant(labels, k): """Computes number of relevant values for each row in labels. For labels with shape [D1, ... DN, num_labels], this is the minimum of `num_labels` and `k`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. k: Integer, k for @k metric. Returns: Integer `Tensor` of shape [D1, ... DN], where each value is the number of relevant values for that row. Raises: ValueError: if inputs have invalid dtypes or values. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(None, 'num_relevant', (labels,)) as scope: # For SparseTensor, calculate separate count for each row. if isinstance(labels, (ops.SparseTensor, ops.SparseTensorValue)): labels_sizes = set_ops.set_size(labels) return math_ops.minimum(labels_sizes, k, name=scope) # For dense Tensor, calculate scalar count based on last dimension, and # tile across labels shape. labels_shape = array_ops.shape(labels) labels_size = labels_shape[-1] num_relevant_scalar = math_ops.minimum(labels_size, k) return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`, except for the last dimension, which might be smaller. This contains only the entries equal to `selected_id`. """ if isinstance(ids, (ops.SparseTensor, ops.SparseTensorValue)): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) return set_ops.set_intersection(filled_selected_id, ids)
def _mean(self): mean = self.beta / (self.alpha - 1.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( self.alpha > 1., mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), self.dtype), self.alpha, message="mean not defined for components of self.alpha <= 1"), ], mean)
def _variance(self): var = (math_ops.square(self.beta) / (math_ops.square(self.alpha - 1.) * (self.alpha - 2.))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( self.alpha > 2., var, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( constant_op.constant(2., dtype=self.dtype), self.alpha, message="variance not defined for components of alpha <= 2"), ], var)
def _mode(self): mode = (self.alpha - 1.) / self.beta if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( self.alpha >= 1., mode, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), self.dtype), self.alpha, message="mode not defined for components of alpha <= 1"), ], mode)
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ if isinstance( ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = set_ops.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, shape=ids_shape)
def _mean(self): mean = self.mu * self._ones() if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return math_ops.select( math_ops.greater(self.df, self._ones()), mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), dtype=self.dtype), self.df, message="mean not defined for components of df <= 1"), ], mean)
def get_init_tokens_op(self, num_tokens=-1): """Returns the op to fill the sync_token_queue with the tokens. This is supposed to be executed in the beginning of the chief/sync thread so that even if the total_num_replicas is less than replicas_to_aggregate, the model can still proceed as the replicas can compute multiple steps per variable update. Make sure: `num_tokens >= replicas_to_aggregate - total_num_replicas`. Args: num_tokens: Number of tokens to add to the queue. Returns: An op for the chief/sync replica to fill the token queue. Raises: ValueError: If this is called before apply_gradients(). ValueError: If num_tokens are smaller than replicas_to_aggregate - total_num_replicas. """ if self._gradients_applied is False: raise ValueError( "get_init_tokens_op() should be called after apply_gradients().") tokens_needed = self._total_num_replicas if num_tokens == -1: num_tokens = self._total_num_replicas elif num_tokens < tokens_needed: raise ValueError( "Too few tokens to finish the first step: %d (given) vs %d (needed)" % (num_tokens, tokens_needed)) init_tokens = [] with ops.device(self._global_step.device), ops.name_scope(""): tokens = array_ops.fill([num_tokens], self._global_step) for i in range(self._total_num_replicas): with ops.control_dependencies([logging_ops.Print(self._global_step, [self._global_step], message="Init token queue")]): init_tokens_op = self._sync_token_queues[i].enqueue(self._global_step) init_tokens.append(init_tokens_op) return init_tokens
def num_relevant(labels, k): """Computes number of relevant values for each row in labels. For labels with shape [D1, ... DN, num_labels], this is the minimum of `num_labels` and `k`. Args: labels: `int64` `Tensor` or `SparseTensor` with shape [D1, ... DN, num_labels], where N >= 1 and num_labels is the number of target classes for the associated prediction. Commonly, N=1 and `labels` has shape [batch_size, num_labels]. k: Integer, k for @k metric. Returns: Integer `Tensor` of shape [D1, ... DN], where each value is the number of relevant values for that row. Raises: ValueError: if inputs have invalid dtypes or values. """ if k < 1: raise ValueError('Invalid k=%s.' % k) with ops.name_scope(None, 'num_relevant', (labels,)) as scope: # For SparseTensor, calculate separate count for each row. if isinstance( labels, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): labels_sizes = set_ops.set_size(labels) return math_ops.minimum(labels_sizes, k, name=scope) # For dense Tensor, calculate scalar count based on last dimension, and # tile across labels shape. labels_shape = array_ops.shape(labels) labels_size = labels_shape[-1] num_relevant_scalar = math_ops.minimum(labels_size, k) return array_ops.fill(labels_shape[0:-1], num_relevant_scalar, name=scope)
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ if isinstance( ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = set_ops.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape)
def _mean(self): mean = self.beta / (self.alpha - 1.) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha > 1., mean, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), self.dtype), self.alpha, message="mean not defined for components of self.alpha <= 1"), ], mean)
def _variance(self): var = (math_ops.square(self.beta) / (math_ops.square(self.alpha - 1.) * (self.alpha - 2.))) if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha > 2., var, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( constant_op.constant(2., dtype=self.dtype), self.alpha, message="variance not defined for components of alpha <= 2"), ], var)
def _mode(self): mode = (self.alpha - 1.) / self.beta if self.allow_nan_stats: nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype()) return array_ops.where( self.alpha >= 1., mode, array_ops.fill(self.batch_shape(), nan, name="nan")) else: return control_flow_ops.with_dependencies([ check_ops.assert_less( array_ops.ones((), self.dtype), self.alpha, message="mode not defined for components of alpha <= 1"), ], mode)
def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False): """Run an LSTM, either forward or backward. This is a 1D LSTM implementation using dynamic_rnn and the TensorFlow LSTM op. Args: inputs: input sequence (length, batch_size, ninput) noutput: depth of output scope: optional scope name reverse: run LSTM in reverse Returns: Output sequence (length, batch_size, noutput) """ with variable_scope.variable_scope(scope, "SeqLstm", [inputs]): # TODO(tmb) make batch size, sequence_length dynamic # example: sequence_length = tf.shape(inputs)[0] _, batch_size, _ = _shape(inputs) lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False) state = array_ops.zeros([batch_size, lstm_cell.state_size]) sequence_length = int(inputs.get_shape()[0]) sequence_lengths = math_ops.to_int64( array_ops.fill([batch_size], sequence_length)) if reverse: inputs = array_ops.reverse_v2(inputs, [0]) outputs, _ = rnn.dynamic_rnn( lstm_cell, inputs, sequence_lengths, state, time_major=True) if reverse: outputs = array_ops.reverse_v2(outputs, [0]) return outputs
def testLSTMCell(self): # Run with all-0 weights, no padding. m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 0., 0.) self.assertAllClose(m, [[0.]] * self._batch_size) self.assertAllClose(c, [[0.]] * self._batch_size) m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 0., 1., 0.) self.assertAllClose(m, [[.25]] * self._batch_size) self.assertAllClose(c, [[.5]] * self._batch_size) m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 0., 0.) self.assertAllClose(m, [[.0]] * self._batch_size) self.assertAllClose(c, [[.0]] * self._batch_size) m, c = self._RunLSTMCell('zeros', init_ops.zeros_initializer(), 1., 1., 0.) self.assertAllClose(m, [[.25]] * self._batch_size) self.assertAllClose(c, [[.5]] * self._batch_size) # Run with all-1 weights, no padding. for m_prev in [0., 1.]: for c_prev in [0., 1.]: m, c = self._RunLSTMCell('ones', init_ops.ones_initializer(), m_prev, c_prev, 0.) self.assertAllClose(m, self._NextM(self._inputs, 1., m_prev, c_prev)) self.assertAllClose(c, self._NextC(self._inputs, 1., m_prev, c_prev)) # Run with random weights. for weight in np.random.rand(3): weight_tf = constant_op.constant(weight, dtypes.float32) random_weight = lambda shape, w=weight_tf: array_ops.fill(shape, w) # No padding. for m_prev in [0., 1.]: for c_prev in [0., 1.]: m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 0.) self.assertAllClose(m, self._NextM(self._inputs, weight, m_prev, c_prev)) self.assertAllClose(c, self._NextC(self._inputs, weight, m_prev, c_prev)) # Set padding. for m_prev in [0., 1.]: for c_prev in [0., 1.]: m, c = self._RunLSTMCell('random', random_weight, m_prev, c_prev, 1.) self.assertAllClose(m, [[m_prev]] * self._batch_size) self.assertAllClose(c, [[c_prev]] * self._batch_size)
def testConstZeroElementOutput(self): """Test consisting of a constant zero element return value.""" def ConstZeroElementOutput(): return array_ops.fill([7, 0], 3.0) self._compare(ConstZeroElementOutput, [], require_kernel_launch=False)
def testMandatoryConstantInput(self): """Tests an operator that has a mandatory-constant shape input.""" def FillWithFloat(x): return array_ops.fill(x, 9.5) self._compare(FillWithFloat, [np.array([3, 2], dtype=np.int32)])
def ctc_label_dense_to_sparse(labels, label_lengths): """Converts CTC labels from dense to sparse. Arguments: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the lablels. """ label_shape = array_ops.shape(labels) num_batches_tns = array_ops.stack([label_shape[0]]) max_num_labels_tns = array_ops.stack([label_shape[1]]) def range_less_than(_, current_input): return array_ops.expand_dims( math_ops.range(label_shape[1]), 0) < array_ops.fill( max_num_labels_tns, current_input) init = math_ops.cast( array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool) dense_mask = functional_ops.scan( range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = array_ops.reshape( array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = array_ops.boolean_mask(label_array, dense_mask) batch_array = array_ops.transpose( array_ops.reshape( array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = array_ops.boolean_mask(batch_array, dense_mask) indices = array_ops.transpose( array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = array_ops.gather_nd(labels, indices) return sparse_tensor.SparseTensor( math_ops.to_int64(indices), vals_sparse, math_ops.to_int64(label_shape))
def _padding(sequences, num_unroll): """For a dictionary of sequences, pads tensors to a multiple of `num_unroll`. Args: sequences: dictionary with `Tensor` values. num_unroll: int specifying to what multiple to pad sequences to. Returns: length: Scalar `Tensor` of dimension 0 of all the values in sequences. padded_sequence: Dictionary of sequences that are padded to a multiple of `num_unroll`. Raises: ValueError: If `num_unroll` not an int or sequences not a dictionary from string to `Tensor`. """ if not isinstance(num_unroll, numbers.Integral): raise ValueError("Unsupported num_unroll expected int, got: %s" % str(num_unroll)) if not isinstance(sequences, dict): raise TypeError("Unsupported sequences expected dict, got: %s" % str(sequences)) for key, value in sequences.items(): if not isinstance(key, six.string_types): raise TypeError("Unsupported sequences key expected string, got: %s" % str(key)) if not sequences: return 0, {} sequences_dict = {} for key, value in sequences.items(): sequences_dict[key] = ops.convert_to_tensor(value) lengths = [array_ops.shape(value)[0] for value in sequences_dict.values()] length = lengths[0] all_lengths_equal = [ control_flow_ops.Assert( math_ops.equal(l, length), [string_ops.string_join( ["All sequence lengths must match, but received lengths: ", string_ops.as_string(lengths)])]) for l in lengths] length = control_flow_ops.with_dependencies(all_lengths_equal, length) unroll = array_ops.constant(num_unroll) padded_length = length + ((unroll - (length % unroll)) % unroll) padded_sequences = {} for key, value in sequences_dict.items(): # 1. create shape of paddings # first dimension of value will be increased by num_paddings to # padded_length num_paddings = [padded_length - array_ops.shape(value)[0]] # the shape of the paddings that we concat with the original value will be # [num_paddings, tf.shape(value)[1], tf.shape(value)[2], ..., # tf.shape(value)[tf.rank(value) - 1])] padding_shape = array_ops.concat(0, ( num_paddings, array_ops.shape(value)[1:])) # 2. fill padding shape with dummies dummy = array_ops.constant("" if value.dtype == dtypes.string else 0, dtype=value.dtype) paddings = array_ops.fill(dims=padding_shape, value=dummy) # 3. concat values with paddings padded_sequences[key] = array_ops.concat(0, [value, paddings]) return length, padded_sequences