我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用tensorflow.python.ops.array_ops.unpack()。
def _ImageDimensions(images, dynamic_shape=False): """Returns the dimensions of an image tensor. Args: images: 4-D Tensor of shape [batch, height, width, channels] dynamic_shape: Whether the input image has undertermined shape. If set to `True`, shape information will be retrieved at run time. Default to `False`. Returns: list of integers [batch, height, width, channels] """ # A simple abstraction to provide names for each dimension. This abstraction # should make it simpler to switch dimensions in the future (e.g. if we ever # want to switch height and width.) if dynamic_shape: return array_ops.unpack(array_ops.shape(images)) else: return images.get_shape().as_list() # In[6]:
def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, depth) lengths: A tensor of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ for input_ in input_seq: input_.set_shape(input_.get_shape().with_rank(2)) # Join into (time, batch_size, depth) s_joined = array_ops_.pack(input_seq) # Reverse along dimension 0 s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops_.unpack(s_reversed) return result
def _sample_n(self, n, seed=None): # We use 2 uniform random floats to generate polar random variates. # http://dl.acm.org/citation.cfm?id=179631 # Theorem 2. Let G, H be iid variates, uniformly distributed on [0,1]. # Let theta = 2*pi*H, let R = sqrt(df*(G^(-2/df) - 1)) for df > 0. # Let X = R*cos(theta), and let Y = R*sin(theta). # Then X ~ t_df and Y ~ t_df. # The variates X and Y are not independent. shape = array_ops.concat(0, ([2, n], self.batch_shape())) uniform = random_ops.random_uniform(shape=shape, dtype=self.dtype, seed=seed) samples_g, samples_h = array_ops.unpack(uniform, num=2) theta = (2. * math.pi) * samples_h r = math_ops.sqrt(self.df * (math_ops.pow(samples_g, -2 / self.df) - 1)) samples = r * math_ops.cos(theta) return samples * self.sigma + self.mu
def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, depth) lengths: A tensor of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ if lengths is None: return list(reversed(input_seq)) input_shape = tensor_shape.matrix(None, None) for input_ in input_seq: input_shape.merge_with(input_.get_shape()) input_.set_shape(input_shape) # Join into (time, batch_size, depth) s_joined = array_ops.pack(input_seq) # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32 if lengths is not None: lengths = math_ops.to_int64(lengths) # Reverse along dimension 0 s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops.unpack(s_reversed) for r in result: r.set_shape(input_shape) return result
def seq2seq_inputs(x, y, input_length, output_length, sentinel=None, name=None): """Processes inputs for Sequence to Sequence models. Args: x: Input Tensor [batch_size, input_length, embed_dim]. y: Output Tensor [batch_size, output_length, embed_dim]. input_length: length of input x. output_length: length of output y. sentinel: optional first input to decoder and final output expected. If sentinel is not provided, zeros are used. Due to fact that y is not available in sampling time, shape of sentinel will be inferred from x. name: Operation name. Returns: Encoder input from x, and decoder inputs and outputs from y. """ with ops.name_scope(name, "seq2seq_inputs", [x, y]): in_x = array_ops_.unpack(x, axis=1) y = array_ops_.unpack(y, axis=1) if not sentinel: # Set to zeros of shape of y[0], using x for batch size. sentinel_shape = array_ops_.pack( [array_ops_.shape(x)[0], y[0].get_shape()[1]]) sentinel = array_ops_.zeros(sentinel_shape) sentinel.set_shape(y[0].get_shape()) in_y = [sentinel] + y out_y = y + [sentinel] return in_x, in_y, out_y
def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, depth) lengths: A tensor of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ if lengths is None: return list(reversed(input_seq)) for input_ in input_seq: input_.set_shape(input_.get_shape().with_rank(2)) # Join into (time, batch_size, depth) s_joined = array_ops_.pack(input_seq) # Reverse along dimension 0 s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops_.unpack(s_reversed) return result
def _cat_probs(self, log_probs): """Get a list of num_components batchwise probabilities.""" which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax cat_probs = which_softmax(self.cat.logits) cat_probs = array_ops.unpack( cat_probs, num=self.num_components, axis=-1) return cat_probs
def __call__(self, inputs, initial_state=None, dtype=None, sequence_length=None, scope=None): is_list = isinstance(inputs, list) if self._use_dynamic_rnn: if is_list: inputs = array_ops.pack(inputs) outputs, state = rnn.dynamic_rnn( self._cell, inputs, sequence_length=sequence_length, initial_state=initial_state, dtype=dtype, time_major=True, scope=scope) if is_list: # Convert outputs back to list outputs = array_ops.unpack(outputs) else: # non-dynamic rnn if not is_list: inputs = array_ops.unpack(inputs) outputs, state = rnn.rnn(self._cell, inputs, initial_state=initial_state, dtype=dtype, sequence_length=sequence_length, scope=scope) if not is_list: # Convert outputs back to tensor outputs = array_ops.pack(outputs) return outputs, state
def __call__(self, inputs, state, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("Cell%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: # print("STATE",state) """ cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) """ cur_state = array_ops.unpack(state)[i] # cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) """ new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(1, new_states)) """ new_states = array_ops.pack(new_states) return cur_inp, new_states
def testBatch(self): try: # Build an arbitrary RGB image np.random.seed(7) batch_size = 5 shape = (batch_size, 2, 7, 3) inp = np.random.rand(*shape).astype(np.float32) # Convert to HSV and back, as a batch and individually with self.test_session() as sess: batch0 = constant_op.constant(inp) batch1 = image_ops.rgb_to_hsv(batch0) batch2 = image_ops.hsv_to_rgb(batch1) split0 = array_ops.unpack(batch0) split1 = list(map(image_ops.rgb_to_hsv, split0)) split2 = list(map(image_ops.hsv_to_rgb, split1)) join1 = array_ops.pack(split1) join2 = array_ops.pack(split2) batch1, batch2, join1, join2 = sess.run([batch1, batch2, join1, join2]) # Verify that processing batch elements together is the same as separate self.assertAllClose(batch1, join1) self.assertAllClose(batch2, join2) self.assertAllClose(batch2, inp) except: import pdb pdb.post_mortem()
def call_rnn_uni_dynamic(cell_encoder, embeddings, sequence_length, dtype): # pack for the time major = False embeddings = array_ops.pack(1, embeddings) encoder_outputs, encoder_state = rnn.dynamic_rnn(cell_encoder, embeddings, sequence_length, dtype) encoder_outputs = array_ops.unpack(encoder_outputs, axis=1) return encoder_outputs, encoder_state
def call_rnn_uni_dynamic(cell_encoder, embeddings, sequence_length, dtype): #print (embeddings[0].get_shape()) # pack for the time major = False embeddings = array_ops.pack(embeddings, axis=1) encoder_outputs, encoder_state = rnn.dynamic_rnn(cell_encoder, embeddings, sequence_length, dtype = dtype) encoder_outputs = array_ops.unpack(encoder_outputs, axis=1) return encoder_outputs, encoder_state
def call_rnn_bidir_static(cell_encoder_fw, cell_encoder_bw, embeddings, dtype): encoder_outputs, encoder_state_fw, encoder_state_bw = rnn.bidirectional_rnn( cell_encoder_fw, cell_encoder_bw, embeddings, dtype = dtype) encoder_state = array_ops.concat(1, [encoder_state_fw, encoder_state_bw]) #encoder_outputs = array_ops.unpack(2, encoder_outputs) return encoder_outputs, encoder_state
def call_rnn_bidir_dynamic(cell_encoder_fw, cell_encoder_bw, embeddings, sequence_length, dtype): embeddings = array_ops.pack(embeddings, axis=1) encoder_outputs, encoder_state = rnn.bidirectional_dynamic_rnn( cell_encoder_fw, cell_encoder_bw, embeddings, sequence_length, dtype = dtype) encoder_outputs = array_ops.concat(2, encoder_outputs) encoder_state = array_ops.concat(1, encoder_state) encoder_outputs = array_ops.unpack(encoder_outputs, axis = 1) return encoder_outputs, encoder_state
def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features) or nested tuples of tensors. lengths: A `Tensor` of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ if lengths is None: return list(reversed(input_seq)) flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq) flat_results = [[] for _ in range(len(input_seq))] for sequence in zip(*flat_input_seq): input_shape = tensor_shape.unknown_shape( ndims=sequence[0].get_shape().ndims) for input_ in sequence: input_shape.merge_with(input_.get_shape()) input_.set_shape(input_shape) # Join into (time, batch_size, depth) s_joined = array_ops.pack(sequence) # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32 if lengths is not None: lengths = math_ops.to_int64(lengths) # Reverse along dimension 0 s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops.unpack(s_reversed) for r, flat_result in zip(result, flat_results): r.set_shape(input_shape) flat_result.append(r) results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result) for input_, flat_result in zip(input_seq, flat_results)] return results
def dense_to_sparse_tensor(dense_tensor, ignore_value=None): """Converts a dense Tensor to a SparseTensor, dropping ignore_value cells. Args: dense_tensor: A `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`). Returns: A `SparseTensor` with the same shape as `dense_tensor`. Raises: ValueError: when `dense_tensor`'s rank is `None`. """ with ops.name_scope("DenseToSparseTensor"): dense_t = ops.convert_to_tensor(dense_tensor) if dense_t.get_shape().ndims is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError("dense_tensor.get_shape() should be defined, got None.") if ignore_value is None: if dense_t.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = "" else: ignore_value = dense_t.dtype.as_numpy_dtype() dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64) indices = array_ops.where( math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype))) index_dims = len(dense_t.get_shape()) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(dense_t, [-1]) flat_indices = indices[:, index_dims - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if index_dims > 1: higher_dims = indices[:, :index_dims - 1] shape_multipliers = array_ops.pack( _multiplier_helper(array_ops.unpack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.mul(higher_dims, shape_multipliers), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor.SparseTensor(indices, values, dense_shape)