我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.unstack()。
def _ImageDimensions(images, dynamic_shape=False): """Returns the dimensions of an image tensor. Args: images: 4-D Tensor of shape [batch, height, width, channels] dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: list of integers [batch, height, width, channels] """ # A simple abstraction to provide names for each dimension. This abstraction # should make it simpler to switch dimensions in the future (e.g. if we ever # want to switch height and width.) if dynamic_shape: return array_ops.unstack(array_ops.shape(images)) else: return images.get_shape().as_list() # In[6]:
def _ImageDimensions(image): """Returns the dimensions of an image tensor. Args: image: A 3-D Tensor of shape `[height, width, channels]`. Returns: A list of `[height, width, channels]` corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise they are integer scalar tensors. """ if image.get_shape().is_fully_defined(): return image.get_shape().as_list() else: static_shape = image.get_shape().with_rank(3).as_list() dynamic_shape = array_ops.unstack(array_ops.shape(image), 3) return [s if s is not None else d for s, d in zip(static_shape, dynamic_shape)]
def testUnpack(self): self._testUnary( array_ops.unstack, np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32), expected=[ np.array([1., 2.], dtype=np.float32), np.array([3., 4.], dtype=np.float32), np.array([5., 6.], dtype=np.float32), ], equality_test=self.ListsAreClose) self._testUnary(lambda x: array_ops.unstack(x, axis=1), np.array([[1., 2.], [3., 4.], [5., 6.]], dtype=np.float32), expected=[ np.array([1., 3., 5.], dtype=np.float32), np.array([2., 4., 6.], dtype=np.float32), ], equality_test=self.ListsAreClose)
def unpack(labeled_tensor, axis_name=None, name=None): """Unpack the tensor. See tf.unpack. Args: labeled_tensor: The input tensor. axis_name: Optional name of axis to unpack. By default, the first axis is used. name: Optional op name. Returns: The list of unpacked LabeledTensors. Raises: ValueError: If `axis_name` is not an axis on the input. """ with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope: labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor) axis_names = list(labeled_tensor.axes.keys()) if axis_name is None: axis_name = axis_names[0] if axis_name not in axis_names: raise ValueError('%s not in %s' % (axis_name, axis_names)) axis = axis_names.index(axis_name) unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope) axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis] return [core.LabeledTensor(t, axes) for t in unpack_ops]
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None): """Processes inputs for Sequence to Sequence models. Args: x: Input Tensor [batch_size, input_length, embed_dim]. y: Output Tensor [batch_size, output_length, embed_dim]. input_length: length of input x. output_length: length of output y. sentinel: optional first input to decoder and final output expected. If sentinel is not provided, zeros are used. Due to fact that y is not available in sampling time, shape of sentinel will be inferred from x. name: Operation name. Returns: Encoder input from x, and decoder inputs and outputs from y. """ with ops.name_scope(name, "seq2seq_inputs", [x, y]): in_x = array_ops.unstack(x, axis=1) y = array_ops.unstack(y, axis=1) if not sentinel: # Set to zeros of shape of y[0], using x for batch size. sentinel_shape = array_ops.stack( [array_ops.shape(x)[0], y[0].get_shape()[1]]) sentinel = array_ops.zeros(sentinel_shape) sentinel.set_shape(y[0].get_shape()) in_y = [sentinel] + y out_y = y + [sentinel] return in_x, in_y, out_y
def _prepare_inputs_for_rnn(sequence_features, context_features, num_unroll): """Prepares features batched by the SQSS for input to a state-saving RNN. Args: sequence_features: A dict of sequence feature name to `Tensor`, with tensors of shape `[batch_size, num_unroll, ...]` and type float32. context_features: A dict of context feature name to `Tensor`, with tensors of shape `[batch_size, 1, ...]` and type float32. num_unroll: Python integer, how many time steps to unroll at a time. The input sequences of length `k` are then split into `k / num_unroll` many segments. Returns: features_by_time: A list of length `num_unroll` with `Tensor` entries of shape `[batch_size, len(sequence_features) + len(context_features)]` of type float32. Features are stored in lexicographic order by their corresponding feature dict keys, first in the `sequence_features` and then in the `context_features` dicts. Context features are copied into each time step. """ def _tile(feature): return array_ops.squeeze( array_ops.tile(array_ops.expand_dims(feature, 1), [1, num_unroll, 1]), axis=2) sequence_features = [sequence_features[k] for k in sorted(sequence_features)] if not context_features: return array_ops.unstack(array_ops.stack(sequence_features, 2), axis=1) context_features = [ _tile(context_features[k]) for k in sorted(context_features) ] return array_ops.unstack( array_ops.stack(sequence_features + context_features, 2), axis=1)
def test(self): unpack_lt = ops.unpack(self.original_lt)[0] golden_lt = core.LabeledTensor( array_ops.unstack(self.original_lt.tensor)[0], [self.a1, self.a2, self.a3]) self.assertLabeledTensorsEqual(unpack_lt, golden_lt)
def _cat_probs(self, log_probs): """Get a list of num_components batchwise probabilities.""" which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1) return cat_probs
def ndlstm_base_unrolled(inputs, noutput, scope=None, reverse=False): """Run an LSTM, either forward or backward. This is a 1D LSTM implementation using unrolling and the TensorFlow LSTM op. Args: inputs: input sequence (length, batch_size, ninput) noutput: depth of output scope: optional scope name reverse: run LSTM in reverse Returns: Output sequence (length, batch_size, noutput) """ with variable_scope.variable_scope(scope, "SeqLstmUnrolled", [inputs]): length, batch_size, _ = _shape(inputs) lstm_cell = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False) state = array_ops.zeros([batch_size, lstm_cell.state_size]) output_u = [] inputs_u = array_ops.unstack(inputs) if reverse: inputs_u = list(reversed(inputs_u)) for i in xrange(length): if i > 0: variable_scope.get_variable_scope().reuse_variables() output, state = lstm_cell(inputs_u[i], state) output_u += [output] if reverse: output_u = list(reversed(output_u)) outputs = array_ops.stack(output_u) return outputs
def sequence_to_final(inputs, noutput, scope=None, name=None, reverse=False): """Run an LSTM across all steps and returns only the final state. Args: inputs: (length, batch_size, depth) tensor noutput: size of output vector scope: optional scope name name: optional name for output tensor reverse: run in reverse Returns: Batch of size (batch_size, noutput). """ with variable_scope.variable_scope(scope, "SequenceToFinal", [inputs]): length, batch_size, _ = _shape(inputs) lstm = core_rnn_cell_impl.BasicLSTMCell(noutput, state_is_tuple=False) state = array_ops.zeros([batch_size, lstm.state_size]) inputs_u = array_ops.unstack(inputs) if reverse: inputs_u = list(reversed(inputs_u)) for i in xrange(length): if i > 0: variable_scope.get_variable_scope().reuse_variables() output, state = lstm(inputs_u[i], state) outputs = array_ops.reshape(output, [batch_size, noutput], name=name) return outputs
def sequence_softmax(inputs, noutput, scope=None, name=None, linear_name=None): """Run a softmax layer over all the time steps of an input sequence. Args: inputs: (length, batch_size, depth) tensor noutput: output depth scope: optional scope name name: optional name for output tensor linear_name: name for linear (pre-softmax) output Returns: A tensor of size (length, batch_size, noutput). """ length, _, ninputs = _shape(inputs) inputs_u = array_ops.unstack(inputs) output_u = [] with variable_scope.variable_scope(scope, "SequenceSoftmax", [inputs]): initial_w = random_ops.truncated_normal([0 + ninputs, noutput], stddev=0.1) initial_b = constant_op.constant(0.1, shape=[noutput]) w = variables.model_variable("weights", initializer=initial_w) b = variables.model_variable("biases", initializer=initial_b) for i in xrange(length): with variable_scope.variable_scope(scope, "SequenceSoftmaxStep", [inputs_u[i]]): # TODO(tmb) consider using slim.fully_connected(..., # activation_fn=tf.nn.softmax) linear = nn_ops.xw_plus_b(inputs_u[i], w, b, name=linear_name) output = nn_ops.softmax(linear) output_u += [output] outputs = array_ops.stack(output_u, name=name) return outputs
def dense_to_sparse_tensor(dense_tensor, ignore_value=None): """Converts a dense Tensor to a SparseTensor, dropping ignore_value cells. Args: dense_tensor: A `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`). Returns: A `SparseTensor` with the same shape as `dense_tensor`. Raises: ValueError: when `dense_tensor`'s rank is `None`. """ with ops.name_scope("DenseToSparseTensor"): dense_t = ops.convert_to_tensor(dense_tensor) if dense_t.get_shape().ndims is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError("dense_tensor.get_shape() should be defined, got None.") if ignore_value is None: if dense_t.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = "" else: ignore_value = dense_t.dtype.as_numpy_dtype() dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64) indices = array_ops.where( math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype))) index_dims = len(dense_t.get_shape()) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(dense_t, [-1]) flat_indices = indices[:, index_dims - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if index_dims > 1: higher_dims = indices[:, :index_dims - 1] shape_multipliers = array_ops.stack( _multiplier_helper(array_ops.unstack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.multiply(higher_dims, shape_multipliers), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor.SparseTensor(indices, values, dense_shape)