我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用tensorflow.dtype()。
def __init__(self, dtype, min_value=None, max_value=None, is_categorical=None, vocabulary_file=''): super(IntDomain, self).__init__(dtype) if not self.dtype.is_integer: raise ValueError('IntDomain must be initialized with an integral dtype.') # NOTE: Because there is no uint64 or 128 bit ints, the following values # are always in the int64 range, which is important for the proto # representation. self._min_value = min_value if min_value is not None else self.dtype.min self._max_value = max_value if max_value is not None else self.dtype.max # Parsing a non-existing value from JSON will return None make sure it is # translated to False. self._is_categorical = (is_categorical if is_categorical is not None else False) self._vocabulary_file = vocabulary_file
def as_feature_spec(self, column): ind = self.index_fields if len(ind) != 1 or len(column.axes) != 1: raise ValueError('tf.Example parser supports only 1-d sparse features.') index = ind[0] if column.domain.dtype not in _TF_EXAMPLE_ALLOWED_TYPES: raise ValueError('tf.Example parser supports only types {}, so it is ' 'invalid to generate a feature_spec with type ' '{}.'.format( _TF_EXAMPLE_ALLOWED_TYPES, repr(column.domain.dtype))) return tf.SparseFeature(index.name, self._value_field_name, column.domain.dtype, column.axes[0].size, index.is_sorted)
def get_bbox_10crop(crop_size, im_size): im_center = im_size[:2] / 2.0 h_indices = (0, (im_size[0] - crop_size[0]) / 2.0) w_indices = (0, (im_size[1] - crop_size[1]) / 2.0) bboxs = np.empty((5, 5), dtype=np.int32) curr = 0 for i in h_indices: for j in w_indices: bboxs[curr, :4] = (i, j, i + crop_size[0], j + crop_size[1]) bboxs[curr, 4] = 1 curr += 1 bboxs[4, :4] = np.tile(im_center, (1, 2)) + np.concatenate([-crop_size / 2.0, crop_size / 2.0]) bboxs[4, 4] = 1 bboxs = np.tile(bboxs, (2, 1)) bboxs[5:, 4] = 0 return bboxs
def _bbox_to_mask(yy, region_size, dtype): # trim bounding box exeeding region_size on top and left neg_part = tf.nn.relu(-yy[:2]) core = tf.ones(tf.to_int32(tf.round(yy[2:] - neg_part)), dtype=dtype) y1 = tf.maximum(yy[0], 0.) x1 = tf.maximum(yy[1], 0.) y2 = tf.minimum(region_size[0], yy[0] + yy[2]) x2 = tf.minimum(region_size[1], yy[1] + yy[3]) padding = (y1, region_size[0] - y2, x1, region_size[1] - x2) padding = tf.reshape(tf.stack(padding), (-1, 2)) padding = tf.to_int32(tf.round(padding)) mask = tf.pad(core, padding) # trim bounding box exeeding region_size on bottom and right rs = tf.to_int32(tf.round(region_size)) mask = mask[:rs[0], :rs[1]] mask.set_shape((None, None)) return mask
def bbox_to_mask(bbox, region_size, output_size, dtype=tf.float32): """Creates a binary mask of size `region_size` where rectangle given by `bbox` is filled with ones and the rest is zeros. Finally, the binary mask is resized to `output_size` with bilinear interpolation. :param bbox: tensor of shape (..., 4) :param region_size: tensor of shape (..., 2) :param output_size: 2-tuple of ints :param dtype: tf.dtype :return: a tensor of shape = (..., output_size) """ shape = tf.concat(axis=0, values=(tf.shape(bbox)[:-1], output_size)) bbox = tf.reshape(bbox, (-1, 4)) region_size = tf.reshape(region_size, (-1, 2)) def create_mask(args): yy, region_size = args return _bbox_to_mask_fixed_size(yy, region_size, output_size, dtype) mask = tf.map_fn(create_mask, (bbox, region_size), dtype=dtype) return tf.reshape(mask, shape)
def get_bbox_10crop(crop_size, im_size): im_center = im_size[:2] / 2.0 h_indices = (0, (im_size[0] - crop_size[0]) / 2.0) w_indices = (0, (im_size[1] - crop_size[1]) / 2.0) bboxs = np.empty((5, 5), dtype=np.int32) curr = 0 for i in h_indices: for j in w_indices: bboxs[curr, :4] = (i, j, i + crop_size[0], j + crop_size[1]) bboxs[curr, 4] = 1 curr += 1 bboxs[4, :4] = np.tile(im_center, (1, 2)) + \ np.concatenate([-crop_size / 2.0, crop_size / 2.0]) bboxs[4, 4] = 1 bboxs = np.tile(bboxs, (2, 1)) bboxs[5:, 4] = 0 return bboxs
def one_hot(labels, num_classes, name='one_hot'): """Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for op_scope. Returns: one hot encoding of the labels. """ with tf.op_scope(name): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(1, [indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.pack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
def create_net(self, shape): print "Creat Net" self.x = tf.placeholder(shape=[None, shape], name="x", dtype=tf.float32) self.y = tf.placeholder(shape=[None], name="y", dtype=tf.float32) out = layers.fully_connected(self.x, num_outputs=5, activation_fn=tf.nn.relu, weights_initializer=tf.contrib.layers.xavier_initializer()) out = layers.fully_connected(out, num_outputs=3, activation_fn=tf.nn.relu, weights_initializer=tf.contrib.layers.xavier_initializer()) self.net = layers.fully_connected(out, num_outputs=1, activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer()) self.net = tf.reshape(self.net, (-1, )) l2 = (self.net - self.y) * (self.net - self.y) self.train = tf.train.AdamOptimizer(1e-4).minimize(l2) tf.global_variables_initializer().run()
def __init__(self, shape, dtype=tf.float32, name=None): """Creates a placeholder for a batch of tensors of a given shape and dtype Parameters ---------- shape: [int] shape of a single elemenet of the batch dtype: tf.dtype number representation used for tensor contents name: str name of the underlying placeholder """ print "C1" super(BatchInput, self).__init__(tf.placeholder(dtype, [None] + list(shape), name=name))
def normc_initializer(std=1.0): def _initializer(shape, dtype=None, partition_info=None): #pylint: disable=W0613 out = np.random.randn(*shape).astype(np.float32) out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True)) return tf.constant(out) return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, summary_tag=None): with tf.variable_scope(name): stride_shape = [1, stride[0], stride[1], 1] filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters] # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = intprod(filter_shape[:3]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = intprod(filter_shape[:2]) * num_filters # initialize weights with random weights w_bound = np.sqrt(6. / (fan_in + fan_out)) w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound), collections=collections) b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(), collections=collections) if summary_tag is not None: tf.summary.image(summary_tag, tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]), [2, 0, 1, 3]), max_images=10) return tf.nn.conv2d(x, w, stride_shape, pad) + b
def __init__(self, var_list, dtype=tf.float32): assigns = [] shapes = list(map(var_shape, var_list)) total_size = np.sum([intprod(shape) for shape in shapes]) self.theta = theta = tf.placeholder(dtype,[total_size]) start=0 assigns = [] for (shape,v) in zip(shapes,var_list): size = intprod(shape) assigns.append(tf.assign(v, tf.reshape(theta[start:start+size],shape))) start+=size self.op = tf.group(*assigns)
def __init__(self, dtype): self._dtype = tf.as_dtype(dtype)
def dtype(self): return self._dtype # Serialize the tf.dtype as a string so that it can be unpickled on DataFlow.
def __init__(self, dtype): super(FloatDomain, self).__init__(dtype) if not self.dtype.is_floating: raise ValueError( 'FloatDomain must be initialized with an floating point dtype.')
def vocabulary_file(self): return self._vocabulary_file # Serialize the tf.dtype as a string so that it can be unpickled on DataFlow.
def __setstate__(self, state): self._dtype = tf.as_dtype(state['dtype']) self._is_categorical = state['is_categorical'] self._min_value = state['min_value'] self._max_value = state['max_value'] self._vocabulary_file = state['vocabulary_file']
def __init__(self, dtype): super(StringDomain, self).__init__(dtype) if self.dtype != tf.string: raise ValueError('StringDomain must be initialized with a string dtype.')
def __init__(self, dtype): super(BoolDomain, self).__init__(dtype) if self.dtype != tf.bool: raise ValueError('BoolDomain must be initialized with a boolean dtype.')
def _dtype_to_domain(dtype): """Create an appropriate Domain for the given dtype.""" if dtype.is_integer: return IntDomain(dtype) if dtype.is_floating: return FloatDomain(dtype) if dtype == tf.string: return StringDomain(dtype) if dtype == tf.bool: return BoolDomain(dtype) raise ValueError('Schema cannot accommodate dtype: {}'.format(dtype))
def as_feature_spec(self, column): if not column.is_fixed_size(): raise ValueError('A column of unknown size cannot be represented as ' 'fixed-size.') if column.domain.dtype not in _TF_EXAMPLE_ALLOWED_TYPES: raise ValueError('tf.Example parser supports only types {}, so it is ' 'invalid to generate a feature_spec with type ' '{}.'.format( _TF_EXAMPLE_ALLOWED_TYPES, repr(column.domain.dtype))) return tf.FixedLenFeature(column.tf_shape().as_list(), column.domain.dtype, self.default_value)
def as_feature_spec(self, column): if column.domain.dtype not in _TF_EXAMPLE_ALLOWED_TYPES: raise ValueError('tf.Example parser supports only types {}, so it is ' 'invalid to generate a feature_spec with type ' '{}.'.format( _TF_EXAMPLE_ALLOWED_TYPES, repr(column.domain.dtype))) return tf.VarLenFeature(column.domain.dtype)
def as_batched_placeholder(self, column): return tf.sparse_placeholder( column.domain.dtype, [None] + column.tf_shape().as_list())
def _from_parse_feature(parse_feature): """Convert a single feature spec to a ColumnSchema.""" # FixedLenFeature if isinstance(parse_feature, tf.FixedLenFeature): representation = FixedColumnRepresentation(parse_feature.default_value) return ColumnSchema(parse_feature.dtype, parse_feature.shape, representation) # FixedLenSequenceFeature if isinstance(parse_feature, tf.FixedLenSequenceFeature): raise ValueError('DatasetSchema does not support ' 'FixedLenSequenceFeature yet.') # VarLenFeature if isinstance(parse_feature, tf.VarLenFeature): representation = ListColumnRepresentation() return ColumnSchema(parse_feature.dtype, [None], representation) # SparseFeature if isinstance(parse_feature, tf.SparseFeature): index_field = SparseIndexField(name=parse_feature.index_key, is_sorted=parse_feature.already_sorted) representation = SparseColumnRepresentation( value_field_name=parse_feature.value_key, index_fields=[index_field]) return ColumnSchema(parse_feature.dtype, [parse_feature.size], representation) raise ValueError('Cannot interpret feature spec {} with type {}'.format( parse_feature, type(parse_feature)))
def assert_valid_dtypes(tensors): """Asserts tensors are all valid types (see `_valid_dtypes`). Args: tensors: Tensors to check. Raises: ValueError: If any tensor is not a valid type. """ valid_dtype = valid_dtypes() for t in tensors: dtype = t.dtype.base_dtype if dtype not in valid_dtype: raise ValueError("Invalid type %r for %s, expected: %s." % (dtype, t.name, [v for v in valid_dtype]))
def constant_value(value_or_tensor_or_var, dtype=None): """Returns value if value_or_tensor_or_var has a constant value. Args: value_or_tensor_or_var: A value, a `Tensor` or a `Variable`. dtype: Optional `tf.dtype`, if set it would check it has the right dtype. Returns: The constant value or None if it not constant. Raises: ValueError: if value_or_tensor_or_var is None or the tensor_variable has the wrong dtype. """ if value_or_tensor_or_var is None: raise ValueError('value_or_tensor_or_var cannot be None') value = value_or_tensor_or_var if isinstance(value_or_tensor_or_var, (ops.Tensor, variables.Variable)): if dtype and value_or_tensor_or_var.dtype != dtype: raise ValueError('It has the wrong type %s instead of %s' % (value_or_tensor_or_var.dtype, dtype)) if isinstance(value_or_tensor_or_var, variables.Variable): value = None else: value = tensor_util.constant_value(value_or_tensor_or_var) return value
def _bbox_to_mask_fixed_size(yy, region_size, output_size, dtype): mask = _bbox_to_mask(yy, region_size, dtype) nonzero_region = tf.greater(tf.reduce_prod(tf.shape(mask)), 0) mask = tf.cond(nonzero_region, lambda: mask, lambda: tf.zeros(output_size, dtype)) mask = tf.image.resize_images(mask[..., tf.newaxis], output_size)[..., 0] return mask
def get_dtype(dtype): """ A helper function to get tf.dtype from str :param dtype: a str, e.g. "int32" :return: corresponding tf.dtype """ assert isinstance(dtype, str) if dtype in __str2dtype: return __str2dtype[dtype] return tf.int32
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='post', value=0.): """ pad_sequences. Pad each sequence to the same length: the length of the longest sequence. If maxlen is provided, any sequence longer than maxlen is truncated to maxlen. Truncation happens off either the beginning or the end (default) of the sequence. Supports pre-padding and post-padding (default). Args: sequences: list of lists where each element is a sequence. maxlen: a `int`, maximum length. dtype: type to cast the resulting sequence. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence value: `float`, value to pad the sequences to the desired value. Returns: x: `numpy array` with dimensions (number_of_sequences, maxlen) """ lengths = [len(s) for s in sequences] nb_samples = len(sequences) if maxlen is None: maxlen = np.max(lengths) x = (np.ones((nb_samples, maxlen)) * value).astype(dtype) for idx, s in enumerate(sequences): if len(s) == 0: continue # empty list was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError("Truncating type '%s' not understood" % padding) if padding == 'post': x[idx, :len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc):] = trunc else: raise ValueError("Padding type '%s' not understood" % padding) return x