我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.dtypes.int32()。
def initial_alignments(self, batch_size, dtype): """Creates the initial alignment values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return a tensor of all zeros. Args: batch_size: `int32` scalar, the batch_size. dtype: The `dtype`. Returns: A `dtype` tensor shaped `[batch_size, alignments_size]` (`alignments_size` is the values' `max_time`). """ max_time = self._alignments_size return _zero_state_tensors(max_time, batch_size, dtype)
def conv_decoder_train(self, enc_output, labels, sequence_length): embed_size = labels.get_shape().as_list()[-1] if self.params["position_embeddings.enable"]: positions_embed = self._create_position_embedding( lengths=sequence_length, maxlen=tf.shape(labels)[1]) labels = self._combiner_fn(labels, positions_embed) # Apply dropout to embeddings inputs = tf.contrib.layers.dropout( inputs=labels, keep_prob=self.params["embedding_dropout_keep_prob"], is_training=self.mode == tf.contrib.learn.ModeKeys.TRAIN) next_layer = self.conv_block(enc_output, inputs, True) logits = _transpose_batch_time(next_layer) sample_ids = tf.cast(tf.argmax(logits, axis=-1), tf.int32) return ConvDecoderOutput(logits=logits, predicted_ids=sample_ids)
def testShapeWithUnknownConcatDim(self): p1 = array_ops.placeholder(dtypes.complex64) c1 = constant_op.constant(np.complex64(10.0+0j), shape=[4, 4, 4, 4]) p2 = array_ops.placeholder(dtypes.complex64) c2 = constant_op.constant(np.complex64(20.0+0j), shape=[4, 4, 4, 4]) dim = array_ops.placeholder(dtypes.int32) concat = array_ops.concat([p1, c1, p2, c2], dim) self.assertEqual(4, concat.get_shape().ndims) # All dimensions unknown. concat2 = array_ops.concat([p1, p2], dim) self.assertEqual(None, concat2.get_shape()) # Rank doesn't match. c3 = constant_op.constant(np.complex64(30.0+0j), shape=[4, 4, 4]) with self.assertRaises(ValueError): array_ops.concat([p1, c1, p2, c3], dim)
def in_top_k(predictions, targets, k): """Returns whether the `targets` are in the top `k` `predictions`. Arguments: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. """ return nn.in_top_k(predictions, targets, k) # CONVOLUTIONS
def _tensor_shape_tensor_conversion_function(s, dtype=None, name=None, as_ref=False): _ = as_ref if not s.is_fully_defined(): raise ValueError( "Cannot convert a partially known TensorShape to a Tensor: %s" % s) s_list = s.as_list() int64_value = 0 for dim in s_list: if dim >= 2**31: int64_value = dim break if dtype is not None: if dtype not in (dtypes.int32, dtypes.int64): raise TypeError("Cannot convert a TensorShape to dtype: %s" % dtype) if dtype == dtypes.int32 and int64_value: raise ValueError("Cannot convert a TensorShape to dtype int32; " "a dimension is too large (%s)" % int64_value) else: dtype = dtypes.int64 if int64_value else dtypes.int32 if name is None: name = "shape_as_tensor" return constant(s_list, dtype=dtype, name=name)
def assert_scalar_int(tensor): """Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`. Args: tensor: Tensor to test. Returns: `tensor`, for chaining. Raises: ValueError: if `tensor` is not 0-D, of type `tf.int32` or `tf.int64`. """ data_type = tensor.dtype if data_type.base_dtype not in [dtypes.int32, dtypes.int64]: raise ValueError('Unexpected type %s for %s.' % (data_type, tensor.name)) shape = tensor.get_shape() if shape.ndims != 0: raise ValueError('Unexpected shape %s for %s.' % (shape, tensor.name)) return tensor
def get_ndims(self, x, name="get_ndims"): """Get `Tensor` number of dimensions (rank). Args: x: `Tensor`. name: `String`. The name to give this op. Returns: ndims: Scalar number of dimensions associated with a `Tensor`. """ with self._name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name="x") ndims = x.get_shape().ndims if ndims is None: return array_ops.rank(x, name="ndims") return ops.convert_to_tensor(ndims, dtype=dtypes.int32, name="ndims")
def _assert_non_negative_int32_scalar(self, x): """Helper which ensures that input is a non-negative, int32, scalar.""" x = ops.convert_to_tensor(x, name="x") if x.dtype.base_dtype != dtypes.int32.base_dtype: raise TypeError("%s.dtype=%s is not %s" % (x.name, x.dtype, dtypes.int32)) x_value_static = tensor_util.constant_value(x) if x.get_shape().ndims is not None and x_value_static is not None: if x.get_shape().ndims != 0: raise ValueError("%s.ndims=%d is not 0 (scalar)" % (x.name, x.get_shape().ndims)) if x_value_static < 0: raise ValueError("%s.value=%d cannot be negative" % (x.name, x_value_static)) return x if self.validate_args: x = control_flow_ops.with_dependencies([ check_ops.assert_rank(x, 0), check_ops.assert_non_negative(x)], x) return x
def set_size(a, validate_indices=True): """Compute number of unique elements along last dimension of `a`. Args: a: `SparseTensor`, with indices sorted in row-major order. validate_indices: Whether to validate the order and range of sparse indices in `a`. Returns: `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the number of unique elements in the corresponding `[0...n-1]` dimension of `a`. Raises: TypeError: If `a` is an invalid types. """ a = tensor_util.convert_to_tensor_or_sparse_tensor(a, name="a") if not isinstance(a, sparse_tensor.SparseTensor): raise TypeError("Expected `SparseTensor`, got %s." % a) if a.values.dtype.base_dtype not in _VALID_DTYPES: raise TypeError("Invalid dtype %s." % a.values.dtype) # pylint: disable=protected-access return gen_set_ops.set_size(a.indices, a.values, a.shape, validate_indices)
def __init__(self, p=None, dtype=dtypes.int32, validate_args=False, allow_nan_stats=True, name="BernoulliWithSigmoidP"): parameters = locals() parameters.pop("self") with ops.name_scope(name) as ns: super(BernoulliWithSigmoidP, self).__init__( p=nn.sigmoid(p), dtype=dtype, validate_args=validate_args, allow_nan_stats=allow_nan_stats, name=ns) self._parameters = parameters
def forward_event_shape(self, input_shape, name="forward_event_shape"): """Shape of a single sample from a single batch as an `int32` 1D `Tensor`. Args: input_shape: `Tensor`, `int32` vector indicating event-portion shape passed into `forward` function. name: name to give to the op Returns: forward_event_shape: `Tensor`, `int32` vector indicating event-portion shape after applying `forward`. """ with self._name_scope(name, [input_shape]): input_shape = ops.convert_to_tensor(input_shape, dtype=dtypes.int32, name="input_shape") return self._forward_event_shape(input_shape)
def __init__(self, event_ndims=0, validate_args=False, name="exp"): """Instantiates the `Exp` bijector. Args: event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions associated with a particular draw from the distribution. validate_args: `Boolean` indicating whether arguments should be checked for correctness. name: `String` name given to ops managed by this object. """ super(Exp, self).__init__( batch_ndims=0, event_ndims=event_ndims, validate_args=validate_args, name=name)
def _voc_seg_load_file(path, epochs=None, shuffle=True, seed=0): PASCAL_ROOT = os.environ['VOC_DIR'] filename_queue = tf.train.string_input_producer([path], num_epochs=epochs, shuffle=shuffle, seed=seed) reader = tf.TextLineReader() key, value = reader.read(filename_queue) image_path, seg_path = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ') image_abspath = PASCAL_ROOT + image_path seg_abspath = PASCAL_ROOT + seg_path image_content = tf.read_file(image_abspath) image = decode_image(image_content, channels=3) image.set_shape([None, None, 3]) imgshape = tf.shape(image)[:2] imgname = image_path seg_content = tf.read_file(seg_abspath) seg = tf.cast(tf.image.decode_png(seg_content, channels=1), tf.int32) return image, seg, imgshape, imgname
def _imagenet_load_file(path, epochs=None, shuffle=True, seed=0, subset='train', prepare_path=True): IMAGENET_ROOT = os.environ.get('IMAGENET_DIR', '') if not isinstance(path, list): path = [path] filename_queue = tf.train.string_input_producer(path, num_epochs=epochs, shuffle=shuffle, seed=seed) reader = tf.TextLineReader() key, value = reader.read(filename_queue) image_path, label_str = tf.decode_csv(value, record_defaults=[[''], ['']], field_delim=' ') if prepare_path: image_abspath = IMAGENET_ROOT + '/images/' + subset + image_path else: image_abspath = image_path image_content = tf.read_file(image_abspath) image = decode_image(image_content, channels=3) image.set_shape([None, None, 3]) imgshape = tf.shape(image)[:2] label = tf.string_to_number(label_str, out_type=tf.int32) return image, label, imgshape, image_path
def mnist_batching(batch_size, subset='training', input_size=None, num_threads=1): xraw, y = _load_mnist(subset, x_dtype=np.float32, y_dtype=np.int32) if input_size is None or input_size == xraw.shape[1]: x = xraw else: x = np.zeros((xraw.shape[0], input_size, input_size, 1), dtype=np.float32) w = (input_size - xraw.shape[1]) // 2 x[:, w:w+xraw.shape[1], w:w+xraw.shape[2]] = xraw[..., np.newaxis] min_after_dequeue = 10 capacity = min_after_dequeue * 3 + batch_size x1, y1 = tf.train.slice_input_producer([x, y], shuffle=True) batch_x, batch_y = tf.train.shuffle_batch([x1, y1], batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue, num_threads=num_threads) return batch_x, batch_y
def temp_testCropping(self): x_shape = [4, 8, 1] x_np = np.array([[1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 3, 4, 5, 6, 7, 8]], dtype=np.int32).reshape(x_shape) y_np = np.array([[3, 4, 5, 6], [3, 4, 5, 6]]).reshape([2, 4, 1]) with self.test_session(): x = constant_op.constant(x_np, shape=x_shape) try: y = image_ops.central_crop(x, 0.5) except: import pdb pdb.post_mortem() y_tf = y.eval() self.assertAllEqual(y_tf, y_np)
def testSampleDistortedBoundingBoxShape(self): with self.test_session(): image_size = constant_op.constant([40, 50, 1], shape=[3], dtype=dtypes.int32) bounding_box = constant_op.constant([[[0.0, 0.0, 1.0, 1.0]]], shape=[1, 1, 4], dtype=dtypes.float32,) begin, end, bbox_for_drawing = image_ops.sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_box, min_object_covered=0.1, aspect_ratio_range=(0.75, 1.33), area_range=(0.05, 1.0)) # Test that the shapes are correct. self.assertAllEqual([3], begin.get_shape().as_list()) self.assertAllEqual([3], end.get_shape().as_list()) self.assertAllEqual([1, 1, 4], bbox_for_drawing.get_shape().as_list())
def testCropping(self): x_np = np.arange(0, 30, dtype=np.int32).reshape([6, 5, 1]) offset_height = 1 after_height = 2 offset_width = 0 after_width = 3 target_height = x_np.shape[0] - offset_height - after_height target_width = x_np.shape[1] - offset_width - after_width y_np = x_np[offset_height:offset_height + target_height, offset_width:offset_width + target_width, :] with self.test_session(): x = constant_op.constant(x_np, shape=x_np.shape) y = image_ops.crop_to_bounding_box(x, offset_height, offset_width, target_height, target_width) y_tf = y.eval() self.assertAllEqual(y_tf.flatten(), y_np.flatten())
def step(self, time, inputs, state, name=None): """Called per step of decoding (but only once for dynamic decoding). Args: time: Scalar `int32` tensor. inputs: Input (possibly nested tuple of) tensor[s] for this time step. state: State (possibly nested tuple of) tensor[s] from previous time step. name: Name scope for any created operations. Returns: `(outputs, next_state, next_inputs, finished)`. """ raise NotImplementedError
def _create_zero_outputs(size, dtype, batch_size): """Create a zero outputs Tensor structure.""" def _t(s): return (s if isinstance(s, ops.Tensor) else constant_op.constant( tensor_shape.TensorShape(s).as_list(), dtype=dtypes.int32, name="zero_suffix_shape")) def _create(s, d): return array_ops.zeros( array_ops.concat( ([batch_size], _t(s)), axis=0), dtype=d) return nest.map_structure(_create, size, dtype)
def batch_size(self): """Returns a scalar int32 tensor.""" raise NotImplementedError("batch_size has not been implemented")
def __init__(self, inputs, sequence_length, time_major=False, name=None): """Initializer. Args: inputs: A (structure of) input tensors. sequence_length: An int32 vector tensor. time_major: Python bool. Whether the tensors in `inputs` are time major. If `False` (default), they are assumed to be batch major. name: Name scope for any created operations. Raises: ValueError: if `sequence_length` is not a 1D tensor. """ with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]): inputs = ops.convert_to_tensor(inputs, name="inputs") if not time_major: inputs = nest.map_structure(_transpose_batch_time, inputs) self._input_tas = nest.map_structure(_unstack_ta, inputs) self._sequence_length = ops.convert_to_tensor( sequence_length, name="sequence_length") if self._sequence_length.get_shape().ndims != 1: raise ValueError( "Expected sequence_length to be a vector, but received shape: %s" % self._sequence_length.get_shape()) self._zero_inputs = nest.map_structure( lambda inp: array_ops.zeros_like(inp[0, :]), inputs) self._batch_size = array_ops.size(sequence_length)
def sample(self, time, outputs, name=None, **unused_kwargs): with ops.name_scope(name, "TrainingHelperSample", [time, outputs]): sample_ids = math_ops.cast( math_ops.argmax(outputs, axis=-1), dtypes.int32) return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None): with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample", [time, outputs, state, sample_ids]): (finished, base_next_inputs, state) = ( super(ScheduledEmbeddingTrainingHelper, self).next_inputs( time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name)) def maybe_sample(): """Perform scheduled sampling.""" where_sampling = math_ops.cast( array_ops.where(sample_ids > -1), dtypes.int32) where_not_sampling = math_ops.cast( array_ops.where(sample_ids <= -1), dtypes.int32) where_sampling_flat = array_ops.reshape(where_sampling, [-1]) where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1]) sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat) inputs_not_sampling = array_ops.gather( base_next_inputs, where_not_sampling_flat) sampled_next_inputs = self._embedding_fn(sample_ids_sampling) base_shape = array_ops.shape(base_next_inputs) return (array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape) + array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, lambda: base_next_inputs, maybe_sample) return (finished, next_inputs, state)
def __init__(self, embedding, start_tokens, end_token): """Initializer. Args: embedding: A callable that takes a vector tensor of `ids` (argmax ids), or the `params` argument for `embedding_lookup`. start_tokens: `int32` vector shaped `[batch_size]`, the start tokens. end_token: `int32` scalar, the token that marks end of decoding. Raises: ValueError: if `sequence_length` is not a 1D tensor. """ if callable(embedding): self._embedding_fn = embedding else: self._embedding_fn = ( lambda ids: embedding_ops.embedding_lookup(embedding, ids)) self._start_tokens = ops.convert_to_tensor( start_tokens, dtype=dtypes.int32, name="start_tokens") self._end_token = ops.convert_to_tensor( end_token, dtype=dtypes.int32, name="end_token") if self._start_tokens.get_shape().ndims != 1: raise ValueError("start_tokens must be a vector") self._batch_size = array_ops.size(start_tokens) if self._end_token.get_shape().ndims != 0: raise ValueError("end_token must be a scalar") self._start_inputs = self._embedding_fn(self._start_tokens)
def sample(self, time, outputs, state, name=None): """sample for GreedyEmbeddingHelper.""" del time, state # unused by sample_fn # Outputs are logits, use argmax to get the most probable id if not isinstance(outputs, ops.Tensor): raise TypeError("Expected outputs to be a single Tensor, got: %s" % type(outputs)) sample_ids = math_ops.cast( math_ops.argmax(outputs, axis=-1), dtypes.int32) return sample_ids
def tf_ops(self, capacity=32): im = tf.placeholder(tf.float32, shape=self.image_shape) label = tf.placeholder(tf.int32, shape=self.label_shape) if self.image_shape is None or self.label_shape is None: shapes = None else: shapes = [self.image_shape, self.label_shape] queue = tf.FIFOQueue(capacity, [tf.float32, tf.int32], shapes=shapes) enqueue_op = queue.enqueue([im, label]) fqr = FeedingQueueRunner(queue, [enqueue_op], feed_fns=[self.feed(im, label).__next__]) tf.train.add_queue_runner(fqr) return queue.dequeue()
def tf_ops(self, capacity=32): images = ops.convert_to_tensor(self._image_fn_list, dtype=dtypes.string) labels = ops.convert_to_tensor(self._label_list, dtype=dtypes.int32) # Makes an input queue im_fn_q, labl_q = tf.train.slice_input_producer( [images, labels], capacity=capacity, shuffle=True) file_contents_q = tf.read_file(im_fn_q) im_q = self._decoder(file_contents_q, channels=3) return im_q, labl_q
def state_saving_rnn(cell, inputs, state_saver, state_name, sequence_length=None, scope=None): """RNN that accepts a state saver for time-truncated RNN calculation. Args: cell: An instance of RNNCell. inputs: A length T list of inputs, each a tensor of shape [batch_size, input_size]. state_saver: A state saver object with methods `state` and `save_state`. state_name: The name to use with the state_saver. sequence_length: (optional) An int32/int64 vector size [batch_size]. See the documentation for rnn() for more details about sequence_length. scope: VariableScope for the created subgraph; defaults to "RNN". Returns: A pair (outputs, state) where: outputs is a length T list of outputs (one for each input) states is the final state Raises: TypeError: If "cell" is not an instance of RNNCell. ValueError: If inputs is None or an empty list. """ initial_state = state_saver.state(state_name) (outputs, state) = rnn(cell, inputs, initial_state=initial_state, sequence_length=sequence_length, scope=scope) save_state = state_saver.save_state(state_name, state) with ops.control_dependencies([save_state]): outputs[-1] = array_ops.identity(outputs[-1]) return (outputs, state)
def _reverse_seq(input_seq, lengths): """Reverse a list of Tensors up to specified lengths. Args: input_seq: Sequence of seq_len tensors of dimension (batch_size, depth) lengths: A tensor of dimension batch_size, containing lengths for each sequence in the batch. If "None" is specified, simply reverses the list. Returns: time-reversed sequence """ if lengths is None: return list(reversed(input_seq)) input_shape = tensor_shape.matrix(None, None) for input_ in input_seq: input_shape.merge_with(input_.get_shape()) input_.set_shape(input_shape) # Join into (time, batch_size, depth) s_joined = array_ops.pack(input_seq) # TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32 if lengths is not None: lengths = math_ops.to_int64(lengths) # Reverse along dimension 0 s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1) # Split again into list result = array_ops.unpack(s_reversed) for r in result: r.set_shape(input_shape) return result
def _smallest_size_at_least(height, width, smallest_side): """Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: and int32 scalar tensor indicating the new width. """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) height = tf.to_float(height) width = tf.to_float(width) smallest_side = tf.to_float(smallest_side) scale = tf.cond(tf.greater(height, width), lambda: smallest_side / width, lambda: smallest_side / height) new_height = tf.to_int32(height * scale) new_width = tf.to_int32(width * scale) return new_height, new_width
def _aspect_preserving_resize(image, smallest_side): """Resize images preserving the original aspect ratio. Args: image: A 3-D image or a 4-D batch of images `Tensor`. smallest_side: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D or 4-D tensor containing the resized image(s). """ smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) input_rank = len(image.get_shape()) if input_rank == 3: image = tf.expand_dims(image, 0) shape = tf.shape(image) height = shape[1] width = shape[2] new_height, new_width = _smallest_size_at_least(height, width, smallest_side) resized_image = tf.image.resize_bilinear(image, [new_height, new_width], align_corners=False) if input_rank == 3: resized_image = tf.squeeze(resized_image) resized_image.set_shape([None, None, 3]) else: resized_image.set_shape([None, None, None, 3]) return resized_image
def zero_state(self, batch_size, dtype): with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): if self._initial_cell_state is not None: cell_state = self._initial_cell_state else: cell_state = self._cell.zero_state(batch_size, dtype) error_message = ( "When calling zero_state of AttentionWrapper %s: " % self._base_name + "Non-matching batch sizes between the memory " "(encoder output) and the requested batch size. Are you using " "the BeamSearchDecoder? If so, make sure your encoder output has " "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and " "the batch_size= argument passed to zero_state is " "batch_size * beam_width.") with ops.control_dependencies( [check_ops.assert_equal(batch_size, self._attention_mechanism.batch_size, message=error_message)]): cell_state = nest.map_structure( lambda s: array_ops.identity(s, name="checked_cell_state"), cell_state) if self._alignment_history: alignment_history = tensor_array_ops.TensorArray( dtype=dtype, size=0, dynamic_size=True) else: alignment_history = () return AttentionWrapperState( cell_state=cell_state, time=array_ops.zeros([], dtype=dtypes.int32), attention=_zero_state_tensors(self._attention_size, batch_size, dtype), alignments=self._attention_mechanism.initial_alignments( batch_size, dtype), alignment_history=alignment_history)
def output_dtype(self): return ConvDecoderOutput( logits=tf.float32, predicted_ids=tf.int32)
def testConcatNoScalars(self): with self.test_session(force_gpu=True): scalar = constant_op.constant(np.complex64(7)) dim = array_ops.placeholder(dtypes.int32) with self.assertRaisesRegexp( ValueError, r"Can't concatenate scalars \(use tf\.pack instead\)"): array_ops.concat([scalar, scalar, scalar], dim) # important as gpu implementation could fail if # shared memory is not large for all the inputs