我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.framework.dtypes.as_dtype()。
def __init__(self, images, labels, dtype=dtypes.float32, reshape=True): dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' %dtype) self._num_examples = images.shape[0] if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ input_shape = [None] + self.input_shape[1:] self._input_placeholder = array_ops.placeholder( dtypes.as_dtype(self._input_dtype), input_shape, name='input') if self.output_shape is None: self._output_placeholder = None else: output_shape = [None] + self.output_shape[1:] self._output_placeholder = array_ops.placeholder( dtypes.as_dtype(self._output_dtype), output_shape, name='output') return self._input_placeholder, self._output_placeholder
def __init__(self, shape, dtype, verify_pd=True, name="OperatorPDIdentity"): """Initialize an `OperatorPDIdentity`. Args: shape: `int32` rank 1 `Tensor` of length at least 2, and with the last two entries equal (since this is a square matrix). dtype: Data type of the matrix that this operator represents. verify_pd: `Boolean`, if `True`, asserts are added to the initialization args to ensure they define this operator as a square (batch) matrix. name: Name to prepend to `Ops`. """ # Grab static shape if available now. with ops.name_scope(name): with ops.name_scope("init", values=[shape]): self._dtype = dtypes.as_dtype(dtype) self._verify_pd = verify_pd self._name = name # Store the static shape (if possible) right now before adding the # asserts, since the asserts prevent .constant_value from working. shape = ops.convert_to_tensor(shape, name="shape") self._get_shape = tensor_shape.TensorShape( tensor_util.constant_value(shape)) self._shape_arg = self._check_shape(shape)
def constant(self, values, dtype=None, shape=None, name="Const"): """Imperative specific implementation of constant-op.""" np_dtype = None # Convert numpy dtype to TensorFlow dtype if needed if dtype: try: dtype = dtypes.as_dtype(dtype) np_dtype = dtype.as_numpy_dtype except TypeError as exc: raise TypeError("Trying to create constant with dtype=%s, " "got TypeError(%s)" % (dtype, exc.message)) # Native TensorFlow has special handling for TensorProto initialized with # a scalar and non-empty shape. For feature parity with TensorFlow we # handle this case by tiling the constant explicitly. if isinstance(values, numbers.Number) and shape: data_array = values*np.ones(shape=shape, dtype=np_dtype) return self.numpy_to_itensor(data_array, dtype=dtype, shape=shape) return self.numpy_to_itensor(values, dtype, shape)
def __init__(self,documents,labels,dtype=dtypes.float32,seed=None): seed1, seed2 = random_seed.get_seed(seed) np.random.seed(seed1 if seed is None else seed2) dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid dtype %r, expected uint8 or float32' % dtype) assert documents.shape[0] == labels.shape[0], ( 'documents.shape: %s labels.shape: %s' % (documents.shape, labels.shape)) self._num_examples = documents.shape[0] self._documents = documents self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False): """[batch] positive definite matrix. Args: shape: `TensorShape` or Python list. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype. force_well_conditioned: Python bool. If `True`, returned matrix has eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are chi-squared random variables. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) if not contrib_tensor_util.is_tensor(shape): shape = tensor_shape.TensorShape(shape) # Matrix must be square. shape[-1].assert_is_compatible_with(shape[-2]) with ops.name_scope("random_positive_definite_matrix"): tril = random_tril_matrix( shape, dtype, force_well_conditioned=force_well_conditioned) return math_ops.matmul(tril, tril, adjoint_b=True)
def _AssertDynamicStitchResultIs(self, indices, data, expected): with self.test_session() as session: index_placeholders = [ array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in indices ] data_placeholders = [ array_ops.placeholder(dtypes.as_dtype(arg.dtype)) for arg in data ] with self.test_scope(): output = data_flow_ops.dynamic_stitch(index_placeholders, data_placeholders) feed_dict = {} for placeholder, value in zip(index_placeholders, indices): feed_dict[placeholder] = value for placeholder, value in zip(data_placeholders, data): feed_dict[placeholder] = value result = session.run(output, feed_dict=feed_dict) self.assertAllClose(expected, result, rtol=1e-3)
def __init__(self, images, labels, start_id=0, fake_data=False, one_hot=False, dtype=dtypes.float32): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._ids = numpy.arange(start_id, start_id + self._num_examples) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def convert_to_tensor_or_sparse_tensor( value, dtype=None, name=None, as_ref=False): """Converts value to a `SparseTensor` or `Tensor`. Args: value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. as_ref: True if we want the result as a ref tensor. Only used if a new `Tensor` is created. Returns: A `SparseTensor` or `Tensor` based on `value`. Raises: RuntimeError: If result type is incompatible with `dtype`. """ if dtype is not None: dtype = dtypes.as_dtype(dtype) if isinstance(value, ops.SparseTensorValue): value = ops.SparseTensor.from_value(value) if isinstance(value, ops.SparseTensor): if dtype and not dtype.is_compatible_with(value.dtype): raise RuntimeError( 'Sparse dtype: requested = %s, actual = %s' % ( dtype.name, value.dtype.name)) return value return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
def __init__(self, key_dtype, value_dtype, name): """Construct a lookup table interface. Args: key_dtype: The table key type. value_dtype: The table value type. name: A name for the operation (optional). """ self._key_dtype = dtypes.as_dtype(key_dtype) self._value_dtype = dtypes.as_dtype(value_dtype) self._name = name
def __init__(self, key_dtype, value_dtype): """Construct a table initializer object. Args: key_dtype: Type of the table keys. value_dtype: Type of the table values. """ self._key_dtype = dtypes.as_dtype(key_dtype) self._value_dtype = dtypes.as_dtype(value_dtype)
def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype
def convert_to_tensor_or_sparse_tensor( value, dtype=None, name=None, as_ref=False): """Converts value to a `SparseTensor` or `Tensor`. Args: value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. as_ref: True if we want the result as a ref tensor. Only used if a new `Tensor` is created. Returns: A `SparseTensor` or `Tensor` based on `value`. Raises: RuntimeError: If result type is incompatible with `dtype`. """ if dtype is not None: dtype = dtypes.as_dtype(dtype) if isinstance(value, sparse_tensor.SparseTensorValue): value = sparse_tensor.SparseTensor.from_value(value) if isinstance(value, sparse_tensor.SparseTensor): if dtype and not dtype.is_compatible_with(value.dtype): raise RuntimeError( 'Sparse dtype: requested = %s, actual = %s' % ( dtype.name, value.dtype.name)) return value return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def should_quantize_const(self, node): if not self.state.output_node_stack: return False top = self.state.output_node_stack[-1] if not top[2]: return False dtype = dtypes.as_dtype(node.attr["dtype"].type) assert dtype == dtypes.float32, ( "Failed to quantized constant %s of type %s" % (node.name, dtype)) return True
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): """ Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError(('Invalid image dtype %r, expected uint8 or ' 'float32') % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def numpy_to_handle(self, array): """Upload numpy array into TensorFlow runtime. Args: array: numpy array to convert to TensorHandle Returns: TensorHandle corresponding to given numpy array. """ tf_dtype = dtypes.as_dtype(array.dtype) current_device = get_current_device_string(self.g) current_device_sanitized = current_device.replace(":", "") key = ("numpy_to_handle", tf_dtype.name, current_device) if key in self.op_cache: holder, handle_op = self.op_cache[key] else: if self.PRINT_CACHE_MISSES: print("Imperative cache miss for %s"%(str(key))) op_prefix = "numpy_to_handle.%s.%s" % (tf_dtype.name, current_device_sanitized) with self.g.as_default(): holder = array_ops.placeholder(dtype=array.dtype, name=op_prefix+".holder") handle_op = session_ops.get_session_handle(holder, name=op_prefix+".handle") self.op_cache[key] = (holder, handle_op) handle = self.run(handle_op, feed_dict={holder: array}) return handle
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def __init__(self, images, labels, fake_data=False, one_hot=False):#, dtype=dtypes.float32): #dtype = dtypes.as_dtype(dtype).base_dtype if fake_data: self._num_examples = 10000 else: assert images.shape[0] == labels.shape[0], ( "images.shape: %s labels.shape: %s" % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns*depth] (assuming depth == 3) #assert images.shape[3] == 1 #images = images.reshape(images.shape[0],images.shape[1] * images.shape[2] * images.shape[3]) # Convert from [0, 255] -> [0.0, 1.0]. images = np.multiply(images, 1.0/255.0) images -= np.mean(images) images = images.astype(np.float32) # normalize labels to values [0,1] for binary classification labels = np.multiply(labels, 1.0/255.0) labels = labels.astype(np.float32) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value=None, delta=1e-3, feed_dict=None, prep_fn=None, limit=0): """Computes the theoretical and numerical jacobian.""" t = dtypes.as_dtype(x.dtype) allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128] assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name t2 = dtypes.as_dtype(y.dtype) assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name if x_init_value is not None: i_shape = list(x_init_value.shape) assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % ( x_shape, i_shape) x_data = x_init_value else: if t == dtypes.float16: dtype = np.float16 elif t == dtypes.float32: dtype = np.float32 else: dtype = np.float64 x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype) print("\ttheoretical jacobian..") jacob_t = _compute_theoretical_jacobian(x, x_shape, x_data, dy, y_shape, dx, feed_dict, prep_fn=prep_fn) print("\tnumeric jacobian..") jacob_n = _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta, feed_dict, prep_fn=prep_fn, limit=limit) return jacob_t, jacob_n
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=False): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels
def __init__(self, images, labels, dtype=dtypes.float32, reshape=True): """Construct a DataSet. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: #assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) #images = np.multiply(images, 1.0 / 255.0) self._images = images-0.5 self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def __init__(self, images, labels, one_hot=False, dtype=dtypes.float32): """Construct a DataSet. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(numpy.float32) images = numpy.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0 self.perm = numpy.arange(self._num_examples) numpy.random.shuffle(self.perm) print('Number of examples: ' + str(self._num_examples))
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=dtypes.float32, reshape=True): dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) if fake_data: self._num_examples = 10000 self.one_hot = one_hot else: assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]) if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def __init__(self, images, labels, one_hot=False, dtype=dtypes.float32, normalize=False): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) if normalize: images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def __init__(self, images, labels, one_hot=False, dtype=dtypes.float32, normalize=True): """Construct a DataSet. one_hot arg is used only if fake_data is true. `dtype` can be either `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into `[0, 1]`. """ dtype = dtypes.as_dtype(dtype).base_dtype if dtype not in (dtypes.uint8, dtypes.float32): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype) assert images.shape[0] == labels.shape[0], ( 'images.shape: %s labels.shape: %s' % (images.shape, labels.shape)) self._num_examples = images.shape[0] if dtype == dtypes.float32: # Convert from [0, 255] -> [0.0, 1.0]. images = images.astype(np.float32) if normalize: images = np.multiply(images, 1.0 / 255.0) self._images = images self._labels = labels self._epochs_completed = 0 self._index_in_epoch = 0
def test_odeint_all_dtypes(self): func = lambda y, t: y t = np.linspace(0.0, 1.0, 11) for y0_dtype in [ dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128 ]: for t_dtype in [dtypes.float32, dtypes.float64]: y0 = math_ops.cast(1.0, y0_dtype) y_solved = odes.odeint(func, y0, math_ops.cast(t, t_dtype)) with self.test_session() as sess: y_solved = sess.run(y_solved) expected = np.asarray(np.exp(t)) self.assertAllClose(y_solved, expected, rtol=1e-5) self.assertEqual(dtypes.as_dtype(y_solved.dtype), y0_dtype)
def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ def get_placeholder(shape, dtype, name_prepend): if shape is None: return None if isinstance(shape, dict): placeholder = {} for key in list(shape.keys()): placeholder[key] = array_ops.placeholder( dtypes.as_dtype(dtype[key]), [None] + shape[key][1:], name=name_prepend + '_' + key) else: placeholder = array_ops.placeholder( dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend) return placeholder self._input_placeholder = get_placeholder(self.input_shape, self._input_dtype, 'input') self._output_placeholder = get_placeholder(self.output_shape, self._output_dtype, 'output') return self._input_placeholder, self._output_placeholder
def assertAC(self, x, y): """Derived classes can set _atol, _rtol to get different tolerance.""" dtype = dtypes.as_dtype(x.dtype) atol = self._atol[dtype] rtol = self._rtol[dtype] self.assertAllClose(x, y, atol=atol, rtol=rtol)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Gaussian entries. Samples are distributed like
N(mean, stddev^2), if dtype is real, X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
Args: shape: `TensorShape` or Python list. Shape of the returned tensor. mean: `Tensor` giving mean of normal to sample from. stddev: `Tensor` giving stdev of normal to sample from. dtype: `TensorFlow` `dtype` or numpy dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_normal"): samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) if dtype.is_complex: if seed is not None: seed += 1234 more_samples = random_ops.random_normal( shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
def random_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) Uniform entries. Samples are distributed like
Uniform[minval, maxval], if dtype is real, X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_uniform"): samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) if dtype.is_complex: if seed is not None: seed += 12345 more_samples = random_ops.random_uniform( shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed) samples = math_ops.complex(samples, more_samples) return samples
def random_sign_uniform(shape, minval=None, maxval=None, dtype=dtypes.float32, seed=None): """Tensor with (possibly complex) random entries from a "sign Uniform". Letting `Z` be a random variable equal to `-1` and `1` with equal probability, Samples from this `Op` are distributed like
Z X, where X ~ Uniform[minval, maxval], if dtype is real, Z (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
Args: shape: `TensorShape` or Python list. Shape of the returned tensor. minval: `0-D` `Tensor` giving the minimum values. maxval: `0-D` `Tensor` giving the maximum values. dtype: `TensorFlow` `dtype` or Python dtype seed: Python integer seed for the RNG. Returns: `Tensor` with desired shape and dtype. """ dtype = dtypes.as_dtype(dtype) with ops.name_scope("random_sign_uniform"): unsigned_samples = random_uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed) if seed is not None: seed += 12 signs = math_ops.sign( random_ops.random_uniform( shape, minval=-1., maxval=1., seed=seed)) return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def _testNAry(self, op, args, expected): with self.test_session() as session: with self.test_scope(): placeholders = [ array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape) for arg in args ] feeds = {placeholders[i]: args[i] for i in range(0, len(args))} output = op(placeholders) result = session.run(output, feeds) self.assertAllClose(result, expected, rtol=1e-3)