我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.add()。
def test_placeholder(self): """Test placeholder functionalities.""" g0 = ops.Graph() with g0.as_default(): a0 = constant_op.constant(1, name="foo") # Test placeholder name. self.assertEqual(ge.util.placeholder_name(a0), "geph__foo_0") self.assertEqual(ge.util.placeholder_name(None), "geph") self.assertEqual( ge.util.placeholder_name( a0, scope="foo/"), "foo/geph__foo_0") self.assertEqual( ge.util.placeholder_name( a0, scope="foo"), "foo/geph__foo_0") self.assertEqual(ge.util.placeholder_name(None, scope="foo/"), "foo/geph") self.assertEqual(ge.util.placeholder_name(None, scope="foo"), "foo/geph") # Test placeholder creation. g0 = ops.Graph() with g0.as_default(): a0 = constant_op.constant(1, dtype=dtypes.float32, name="a0") c0 = math_ops.add( ge.util.make_placeholder_from_tensor(a0), ge.util.make_placeholder_from_dtype_and_shape(dtype=dtypes.float32)) self.assertEqual(c0.op.inputs[0].op.name, "geph__a0_0") self.assertEqual(c0.op.inputs[1].op.name, "geph")
def test_reroute_can_modify(self): graph = ops.Graph() # create a special graph where "a" is an ambiguous tensor. That is # it is both an input and an output of the ops in sgv0. with graph.as_default(): a = constant_op.constant(1.0, shape=[2], name="a") b = constant_op.constant(2.0, shape=[2], name="b") c = math_ops.add(a, b, name="c") d = math_ops.add(a, c, name="d") e = constant_op.constant(1.0, shape=[2], name="e") f = constant_op.constant(2.0, shape=[2], name="f") g = math_ops.add(e, f, name="g") sgv0 = ge.sgv(a.op, b.op, c.op) sgv1 = ge.sgv(e.op, f.op) ge.swap_outputs(sgv0, sgv1) self.assertTrue( ge.OpMatcher("g").input_ops("a", ge.OpMatcher("c").input_ops("a", "b"))( g.op)) self.assertTrue(ge.OpMatcher("d").input_ops("e", "f")(d.op))
def testTraversesControlInputs(self): dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.)) logits = dt1.value() * 3. dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits)) dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.)) x = dt3.value() y = array_ops.ones((2, 2)) * 4. z = array_ops.ones((2, 2)) * 3. out = control_flow_ops.cond( math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y), lambda: math_ops.square(z)) out += 5. dep_map = sg._stochastic_dependencies_map([out]) self.assertEqual(dep_map[dt1], set([out])) self.assertEqual(dep_map[dt2], set([out])) self.assertEqual(dep_map[dt3], set([out]))
def testIgnoredArguments(self): """Tests that JIT computations can ignore formal parameters.""" with self.test_session() as sess: x = array_ops.placeholder(dtypes.int32) y = array_ops.placeholder(dtypes.int32) with jit_scope(): z = math_ops.add(x, x) w = math_ops.add(y, y) # Pulls 'w' into the same compilation via control dependencies. with ops.control_dependencies([w]): n = control_flow_ops.no_op() with ops.control_dependencies([n]): t = math_ops.add(z, z) run_metadata = config_pb2.RunMetadata() out = sess.run(t, {x: np.int32(7), y: np.int32(404)}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assert_(MetadataHasXlaLaunch(run_metadata)) self.assertAllClose(28, out)
def tfadd_with_ckpt_saver(out_dir): x = array_ops.placeholder(dtypes.int32, name='x_hold') y = variables.Variable(constant_op.constant([0]), name='y_saved') math_ops.add(x, y, name='x_y_sum') init_op = variables.initialize_all_variables() saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1) with session.Session() as sess: sess.run(init_op) sess.run(y.assign(y + 42)) # Without the checkpoint, the variable won't be set to 42. ckpt_file = '%s/test_graph_tfadd_with_ckpt_saver.ckpt' % out_dir saver.save(sess, ckpt_file) # Without the SaverDef, the restore op won't be named correctly. saver_file = '%s/test_graph_tfadd_with_ckpt_saver.saver' % out_dir with open(saver_file, 'w') as f: f.write(saver.as_saver_def().SerializeToString())
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a normal distribution. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See @{tf.set_random_seed} for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random normal values. """ with ops.name_scope(name, "random_normal", [shape, mean, stddev]) as name: shape_tensor = _ShapeTensor(shape) mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops._random_standard_normal( shape_tensor, dtype, seed=seed1, seed2=seed2) mul = rnd * stddev_tensor value = math_ops.add(mul, mean_tensor, name=name) return value
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. mean: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddev: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See @{tf.set_random_seed} for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values. """ with ops.name_scope(name, "truncated_normal", [shape, mean, stddev]) as name: shape_tensor = _ShapeTensor(shape) mean_tensor = ops.convert_to_tensor(mean, dtype=dtype, name="mean") stddev_tensor = ops.convert_to_tensor(stddev, dtype=dtype, name="stddev") seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops._truncated_normal( shape_tensor, dtype, seed=seed1, seed2=seed2) mul = rnd * stddev_tensor value = math_ops.add(mul, mean_tensor, name=name) return value
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: the tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation. """ with ops.name_scope(scope, 'Dropout', [inputs]) as sc: inputs = ops.convert_to_tensor(inputs) dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape) id_fn = lambda: inputs outputs = utils.smart_cond(is_training, dropout_fn, id_fn) return utils.collect_named_outputs(outputs_collections, sc, outputs)
def flatten(inputs, outputs_collections=None, scope=None): """Flattens the input while maintaining the batch_size. Assumes that the first dimension represents the batch. Args: inputs: a tensor of size [batch_size, ...]. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: a flattened tensor with shape [batch_size, k]. Raises: ValueError: if inputs.shape is wrong. """ with ops.name_scope(scope, 'Flatten', [inputs]) as sc: inputs = ops.convert_to_tensor(inputs) inputs_shape = inputs.get_shape() inputs_rank = inputs_shape.ndims if (inputs_rank is None) or (inputs_rank < 2): raise ValueError('Inputs must have a least 2 dimensions.') dims = inputs_shape[1:] if not dims.is_fully_defined(): raise ValueError('Inputs 2nd dimension must be defined.') k = dims.num_elements() outputs = array_ops.reshape(inputs, [-1, k]) return utils.collect_named_outputs(outputs_collections, sc, outputs)
def one_hot_encoding(labels, num_classes, on_value=1.0, off_value=0.0, outputs_collections=None, scope=None): """Transform numeric labels into onehot_labels using tf.one_hot. Args: labels: [batch_size] target labels. num_classes: total number of classes. on_value: A scalar defining the on-value. off_value: A scalar defining the off-value. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc: labels = ops.convert_to_tensor(labels) if labels.dtype == dtypes.int32: labels = standard_ops.to_int64(labels) outputs = standard_ops.one_hot(labels, num_classes, on_value=on_value, off_value=off_value) return utils.collect_named_outputs(outputs_collections, sc, outputs)
def unit_norm(inputs, dim, epsilon=1e-7, scope=None): """Normalizes the given input across the specified dimension to unit length. Note that the rank of `input` must be known. Args: inputs: A `Tensor` of arbitrary size. dim: The dimension along which the input is normalized. epsilon: A small value to add to the inputs to avoid dividing by zero. scope: Optional scope for variable_scope. Returns: The normalized `Tensor`. Raises: ValueError: If dim is smaller than the number of dimensions in 'inputs'. """ with variable_scope.variable_scope(scope, 'UnitNorm', [inputs]): if not inputs.get_shape(): raise ValueError('The input rank must be known.') input_rank = len(inputs.get_shape().as_list()) if dim < 0 or dim >= input_rank: raise ValueError( 'dim must be positive but smaller than the input rank.') lengths = math_ops.sqrt(epsilon + math_ops.reduce_sum( math_ops.square(inputs), dim, True)) multiples = [] if dim > 0: multiples.append(array_ops.ones([dim], dtypes.int32)) multiples.append(array_ops.slice(array_ops.shape(inputs), [dim], [1])) if dim < (input_rank - 1): multiples.append(array_ops.ones([input_rank - 1 - dim], dtypes.int32)) multiples = array_ops.concat(0, multiples) return math_ops.div(inputs, array_ops.tile(lengths, multiples))
def dropout(inputs, keep_prob=0.5, noise_shape=None, is_training=True, outputs_collections=None, scope=None): """Returns a dropout op applied to the input. With probability `keep_prob`, outputs the input element scaled up by `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected sum is unchanged. Args: inputs: the tensor to pass to the nn.dropout op. keep_prob: A scalar `Tensor` with the same type as x. The probability that each element is kept. noise_shape: A 1-D `Tensor` of type `int32`, representing the shape for randomly generated keep/drop flags. is_training: A bool `Tensor` indicating whether or not the model is in training mode. If so, dropout is applied and values scaled. Otherwise, inputs is returned. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: a tensor representing the output of the operation. """ with ops.name_scope(scope, 'Dropout', [inputs]) as sc: inputs = ops.convert_to_tensor(inputs) dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape) id_fn = lambda: array_ops.identity(inputs) outputs = utils.smart_cond(is_training, dropout_fn, id_fn) return utils.collect_named_outputs(outputs_collections, sc, outputs)
def one_hot_encoding(labels, num_classes, on_value=1.0, off_value=0.0, outputs_collections=None, scope=None): """Transform numeric labels into onehot_labels using `tf.one_hot`. Args: labels: [batch_size] target labels. num_classes: total number of classes. on_value: A scalar defining the on-value. off_value: A scalar defining the off-value. outputs_collections: collection to add the outputs. scope: Optional scope for name_scope. Returns: one hot encoding of the labels. """ with ops.name_scope(scope, 'OneHotEncoding', [labels, num_classes]) as sc: labels = ops.convert_to_tensor(labels) if labels.dtype == dtypes.int32: labels = standard_ops.to_int64(labels) outputs = standard_ops.one_hot(labels, num_classes, on_value=on_value, off_value=off_value) return utils.collect_named_outputs(outputs_collections, sc, outputs)
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ if isinstance( ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = set_ops.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, shape=ids_shape)
def __add__(self, other): return add(self, other)
def __radd__(self, other): return add(other, self)
def normalize_moments(counts, mean_ss, variance_ss, shift, name=None): """Calculate the mean and variance of based on the sufficient statistics. Args: counts: A `Tensor` containing a the total count of the data (one value). mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly shifted) sum of the elements to average over. variance_ss: A `Tensor` containing the variance sufficient statistics: the (possibly shifted) squared sum of the data to compute the variance over. shift: A `Tensor` containing the value by which the data is shifted for numerical stability, or `None` if no shift was performed. name: Name used to scope the operations that compute the moments. Returns: Two `Tensor` objects: `mean` and `variance`. """ with tf.variable_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]): divisor = math_ops.reciprocal(counts, name="divisor") if shift is not None: shifted_mean = math_ops.multiply(mean_ss, divisor, name="shifted_mean") mean = math_ops.add(shifted_mean, shift, name="mean") else: # no shift. shifted_mean = math_ops.multiply(mean_ss, divisor, name="mean") mean = shifted_mean variance = math_ops.subtract(math_ops.multiply(variance_ss, divisor), math_ops.square(shifted_mean), name="variance") return (mean, variance)
def setUp(self): self.graph = ops_lib.Graph() with self.graph.as_default(): self.a = constant_op.constant([1., 1.], shape=[2], name="a") with ops_lib.name_scope("foo"): self.b = constant_op.constant([2., 2.], shape=[2], name="b") self.c = math_ops.add(self.a, self.b, name="c") self.d = constant_op.constant([3., 3.], shape=[2], name="d") with ops_lib.name_scope("bar"): self.e = math_ops.add(self.c, self.d, name="e") self.f = math_ops.add(self.c, self.d, name="f") self.g = math_ops.add(self.c, self.a, name="g") with ops_lib.control_dependencies([self.c.op]): self.h = math_ops.add(self.f, self.g, name="h")
def test_compute_boundary_ts_2(self): """Test for ge.compute_boundary_ts.""" graph = ops_lib.Graph() with graph.as_default(): a = constant_op.constant(1, name="a") b = constant_op.constant(1, name="b") c = math_ops.add(a, b, name="c") _ = a + c input_ts, output_ts, inside_ts = ge.compute_boundary_ts([a.op, c.op]) self.assertEqual(list(input_ts), [b]) self.assertEqual(list(output_ts), [a, c]) self.assertEqual(list(inside_ts), [a])
def setUp(self): self.graph = ops.Graph() with self.graph.as_default(): c0 = constant_op.constant(1.0, shape=[10], name="Const") c1 = constant_op.constant(1.0, shape=[10], name="Const") c2 = constant_op.constant(1.0, shape=[10], name="Const") i = constant_op.constant(1.0, shape=[10], name="Input") self.o = math_ops.add(c2, math_ops.add(c1, math_ops.add(c0, i)))
def test_copy_assert(self): ops.reset_default_graph() a = constant_op.constant(1) b = constant_op.constant(1) eq = math_ops.equal(a, b) assert_op = control_flow_ops.Assert(eq, [a, b]) with ops.control_dependencies([assert_op]): _ = math_ops.add(a, b) sgv = ge.make_view([assert_op, eq.op, a.op, b.op]) copier = ge.Transformer() _, info = copier(sgv, sgv.graph, "", "") new_assert_op = info.transformed(assert_op) self.assertIsNotNone(new_assert_op)
def test_transform(self): transformer = ge.Transformer() def my_transform_op_handler(info, op): add_noise = op.name.startswith("Add") op_, op_outputs_ = ge.transform.copy_op_handler(info, op) if not add_noise: return op_, op_outputs_ # add some noise to op with info.graph_.as_default(): t_ = math_ops.add( constant_op.constant(1.0, shape=[10], name="Noise"), op_.outputs[0], name="AddNoise") # return the "noisy" op return op_, [t_] transformer.transform_op_handler = my_transform_op_handler graph = ops.Graph() transformer(self.graph, graph, "", "") matcher0 = ge.OpMatcher("AddNoise").input_ops( "Noise", ge.OpMatcher("Add").input_ops("Const", "Input")) matcher1 = ge.OpMatcher("AddNoise_1").input_ops( "Noise_1", ge.OpMatcher("Add_1").input_ops("Const_1", matcher0)) matcher2 = ge.OpMatcher("AddNoise_2").input_ops( "Noise_2", ge.OpMatcher("Add_2").input_ops("Const_2", matcher1)) top = ge.select_ops("^AddNoise_2$", graph=graph)[0] self.assertTrue(matcher2(top))
def test_connect(self): """Test for ge.connect.""" with self.graph.as_default(): x = constant_op.constant([1., 1.], shape=[2], name="x") y = constant_op.constant([2., 2.], shape=[2], name="y") z = math_ops.add(x, y, name="z") sgv = ge.sgv(x.op, y.op, z.op) ge.connect(sgv, ge.sgv(self.e.op).remap_inputs([0])) self.assertTrue( ge.OpMatcher("^foo/bar/e$").input_ops("^z$", "foo/d$")(self.e.op))
def test_make_list_of_t(self): """Test for ge.util.make_list_of_t.""" g0 = ops.Graph() with g0.as_default(): a0 = constant_op.constant(1) b0 = constant_op.constant(2) c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable # Should extract the tensors from tre graph. self.assertEqual(len(ge.util.make_list_of_t(g0)), 3) # Should extract the tensors from the tuple self.assertEqual(len(ge.util.make_list_of_t((a0, b0))), 2) # Should extract the tensors and ignore the ops. self.assertEqual( len(ge.util.make_list_of_t( (a0, a0.op, b0), ignore_ops=True)), 2)
def test_get_generating_consuming(self): """Test for ge.util.get_generating_ops and ge.util.get_generating_ops.""" g0 = ops.Graph() with g0.as_default(): a0 = constant_op.constant(1) b0 = constant_op.constant(2) c0 = math_ops.add(a0, b0) self.assertEqual(len(ge.util.get_generating_ops([a0, b0])), 2) self.assertEqual(len(ge.util.get_consuming_ops([a0, b0])), 1) self.assertEqual(len(ge.util.get_generating_ops([c0])), 1) self.assertEqual(ge.util.get_consuming_ops([c0]), [])
def test_control_outputs(self): """Test for the ge.util.ControlOutputs class.""" g0 = ops.Graph() with g0.as_default(): a0 = constant_op.constant(1) b0 = constant_op.constant(2) x0 = constant_op.constant(3) with ops.control_dependencies([x0.op]): c0 = math_ops.add(a0, b0) # pylint: disable=unused-variable control_outputs = ge.util.ControlOutputs(g0).get_all() self.assertEqual(len(control_outputs), 1) self.assertEqual(len(control_outputs[x0.op]), 1) self.assertIs(list(control_outputs[x0.op])[0], c0.op)
def setUp(self): self.graph = ops.Graph() with self.graph.as_default(): self.a = constant_op.constant([1., 1.], shape=[2], name="a") with ops.name_scope("foo"): self.b = constant_op.constant([2., 2.], shape=[2], name="b") self.c = math_ops.add(self.a, self.b, name="c") self.d = constant_op.constant([3., 3.], shape=[2], name="d") with ops.name_scope("bar"): self.e = math_ops.add(self.c, self.d, name="e") self.f = math_ops.add(self.c, self.d, name="f") self.g = math_ops.add(self.c, self.a, name="g") with ops.control_dependencies([self.c.op]): self.h = math_ops.add(self.f, self.g, name="h")
def setUp(self): self.graph = ops.Graph() with self.graph.as_default(): self.a0 = constant_op.constant(1.0, shape=[2], name="a0") self.b0 = constant_op.constant(2.0, shape=[2], name="b0") self.c0 = math_ops.add(self.a0, self.b0, name="c0") self.a1 = constant_op.constant(3.0, shape=[2], name="a1") self.b1 = constant_op.constant(4.0, shape=[2], name="b1") self.c1 = math_ops.add(self.a1, self.b1, name="c1") self.a2 = constant_op.constant(3.0, shape=[3], name="a2") self.b2 = constant_op.constant(4.0, shape=[3], name="b2") self.c2 = math_ops.add(self.a2, self.b2, name="c2")
def testPathwiseDerivativeDoesNotAddSurrogateLosses(self): with self.test_session(): mu = [0.0, 0.1, 0.2] sigma = constant_op.constant([1.1, 1.2, 1.3]) with st.value_type(st.SampleValue()): prior = st.StochasticTensor(distributions.Normal(loc=mu, scale=sigma)) likelihood = st.StochasticTensor( distributions.Normal( loc=prior, scale=sigma)) self.assertEqual( prior.distribution.reparameterization_type, distributions.FULLY_REPARAMETERIZED) self.assertEqual( likelihood.distribution.reparameterization_type, distributions.FULLY_REPARAMETERIZED) loss = math_ops.square(array_ops.identity(likelihood) - [0.0, 0.1, 0.2]) sum_loss = math_ops.reduce_sum(loss) surrogate_loss = sg.surrogate_loss([loss]) with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"): _ = sg.surrogate_loss([sum_loss]) surrogate_from_both = sg.surrogate_loss( [loss, sum_loss * array_ops.ones_like(loss)]) # Pathwise derivative terms do not require add'l surrogate loss terms. with self.test_session() as sess: self.assertAllClose(*sess.run([loss, surrogate_loss])) self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both]))
def _select_class_id(ids, selected_id): """Filter all but `selected_id` out of `ids`. Args: ids: `int64` `Tensor` or `SparseTensor` of IDs. selected_id: Int id to select. Returns: `SparseTensor` of same dimensions as `ids`. This contains only the entries equal to `selected_id`. """ if isinstance( ids, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)): return sparse_ops.sparse_retain( ids, math_ops.equal(ids.values, selected_id)) # TODO(ptucker): Make this more efficient, maybe add a sparse version of # tf.equal and tf.reduce_any? # Shape of filled IDs is the same as `ids` with the last dim collapsed to 1. ids_shape = array_ops.shape(ids, out_type=dtypes.int64) ids_last_dim = array_ops.size(ids_shape) - 1 filled_selected_id_shape = math_ops.reduced_shape( ids_shape, array_ops.reshape(ids_last_dim, [1])) # Intersect `ids` with the selected ID. filled_selected_id = array_ops.fill( filled_selected_id_shape, math_ops.to_int64(selected_id)) result = set_ops.set_intersection(filled_selected_id, ids) return sparse_tensor.SparseTensor( indices=result.indices, values=result.values, dense_shape=ids_shape)
def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3]
def test_unconstrained(self): def objective(x): """Rosenbrock function. (Carl Edward Rasmussen, 2001-07-21). f(x) = sum_{i=1:D-1} 100*(x(i+1) - x(i)^2)^2 + (1-x(i))^2 Args: x: a Variable Returns: f: a tensor (objective value) """ d = array_ops.size(x) s = math_ops.add( 100 * math_ops.square( math_ops.subtract( array_ops.strided_slice(x, [1], [d]), math_ops.square(array_ops.strided_slice(x, [0], [d - 1])))), math_ops.square( math_ops.subtract(1.0, array_ops.strided_slice(x, [0], [d - 1])))) return math_ops.reduce_sum(s) dimension = 5 x = variables.Variable(array_ops.zeros(dimension)) optimizer = external_optimizer.ScipyOptimizerInterface(objective(x)) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) optimizer.minimize(sess) self.assertAllClose(np.ones(dimension), sess.run(x))
def testAdd(self): val1 = np.array([4, 3, 2, 1], dtype=np.float32) val2 = np.array([5, 6, 7, 8], dtype=np.float32) expected = val1 + val2 with self.test_session(): with self.test_scope(): input1 = constant_op.constant(val1, name="const1") input2 = constant_op.constant(val2, name="const2") output = math_ops.add(input1, input2) result = output.eval() self.assertAllClose(result, expected, rtol=1e-3)
def testCond(self): """Tests that tf.cond works on XLA devices.""" with session_lib.Session() as session: x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) c = array_ops.placeholder(dtypes.bool) with ops.device("device:XLA_CPU:0"): z = x + 1.0 w = control_flow_ops.cond(c, lambda: z, lambda: y) t = math_ops.add(z, w) result = session.run(t, {x: np.float32(2), y: np.float32(4), c: True}) self.assertAllClose(result, np.float32(6), rtol=1e-3)
def testAliasing(self): """Regression test for compiled functions that return an aliased buffer. XLA returns aliased buffers if outputs are identical. Tests that we handle that case. """ def AddOnceReturnTwice(x): y = math_ops.add(x, x) return y, y # Exercises compling a function (say, Foo) which calls another # function (say, Bar) which is not inlined. When the compiler compiles # Foo, it needs to symbolic execute Bar correctly regardless whether # Bar is inlined or not. # # Tests compiled=True and noinline=True. self._compare( AddOnceReturnTwice, [np.array( [[[0.5, -1.0]]], dtype=np.float32)], noinline=True) # Tests compiled=True and noinline=False. self._compare( AddOnceReturnTwice, [np.array( [[[0.5, -1.0]]], dtype=np.float32)], noinline=False)
def testInt32Input(self): """Test an int32-typed input. On a GPU, int32 tensors will be placed in host memory. """ def AddToSelf(x): return math_ops.add(x, x) self._compare(AddToSelf, [np.array([7, 1, 3], dtype=np.int32)])
def testExplicitMarking(self): """Test explicit marking of operators to compile.""" batch_size = 16 image_size = 28 * 28 num_classes = 10 with ops.Graph().as_default(): x = array_ops.placeholder(dtypes.float32) w = array_ops.placeholder(dtypes.float32) b = array_ops.placeholder(dtypes.float32) with jit_scope(): y1 = math_ops.matmul(x, w) y2 = math_ops.add(y1, b) with jit_scope(): y = math_ops.square(y2) dw = np.random.random_sample((image_size, num_classes)).astype(np.float32) db = np.random.random_sample((num_classes)).astype(np.float32) dx = np.random.random_sample((batch_size, image_size)).astype(np.float32) with session_lib.Session() as sess: run_metadata = config_pb2.RunMetadata() output = sess.run(y, {x: dx, w: dw, b: db}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) # TODO(phawkins): really we would like to test that there were exactly # two kernel launches. However, we have no reliable way to determine # that. self.assert_(MetadataHasXlaLaunch(run_metadata)) expected = np.square(np.dot(dx, dw) + db) self.assertAllClose(expected, output, rtol=1e-1)
def testBroadcasting(self): """Tests broadcasting behavior of an operator.""" for dtype in self.numeric_types: self._testBinary( math_ops.add, np.array(3, dtype=dtype), np.array([10, 20], dtype=dtype), expected=np.array([13, 23], dtype=dtype)) self._testBinary( math_ops.add, np.array([10, 20], dtype=dtype), np.array(4, dtype=dtype), expected=np.array([14, 24], dtype=dtype)) # [1,3] x [4,1] => [4,3] self._testBinary( math_ops.add, np.array([[10, 20, 30]], dtype=dtype), np.array([[1], [2], [3], [4]], dtype=dtype), expected=np.array( [[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]], dtype=dtype)) # [3] * [4,1] => [4,3] self._testBinary( math_ops.add, np.array([10, 20, 30], dtype=dtype), np.array([[1], [2], [3], [4]], dtype=dtype), expected=np.array( [[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]], dtype=dtype))
def tfadd(_): x = constant_op.constant([1], name='x_const') y = constant_op.constant([2], name='y_const') math_ops.add(x, y, name='x_y_sum')
def tfadd_with_ckpt(out_dir): x = array_ops.placeholder(dtypes.int32, name='x_hold') y = variables.Variable(constant_op.constant([0]), name='y_saved') math_ops.add(x, y, name='x_y_sum') init_op = variables.initialize_all_variables() saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1) with session.Session() as sess: sess.run(init_op) sess.run(y.assign(y + 42)) # Without the checkpoint, the variable won't be set to 42. ckpt = '%s/test_graph_tfadd_with_ckpt.ckpt' % out_dir saver.save(sess, ckpt)
def tfmatmulandadd(_): # This tests multiple outputs. x = array_ops.placeholder(dtypes.float32, name='x_hold') y = array_ops.placeholder(dtypes.float32, name='y_hold') math_ops.matmul(x, y, name='x_y_prod') math_ops.add(x, y, name='x_y_sum')
def random_uniform(shape, minval=0, maxval=None, dtype=dtypes.float32, seed=None, name=None): """Outputs random values from a uniform distribution. The generated values follow a uniform distribution in the range `[minval, maxval)`. The lower bound `minval` is included in the range, while the upper bound `maxval` is excluded. For floats, the default range is `[0, 1)`. For ints, at least `maxval` must be specified explicitly. In the integer case, the random integers are slightly biased unless `maxval - minval` is an exact power of two. The bias is small for values of `maxval - minval` significantly smaller than the range of the output (either `2**32` or `2**64`). Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. minval: A 0-D Tensor or Python value of type `dtype`. The lower bound on the range of random values to generate. Defaults to 0. maxval: A 0-D Tensor or Python value of type `dtype`. The upper bound on the range of random values to generate. Defaults to 1 if `dtype` is floating point. dtype: The type of the output: `float32`, `float64`, `int32`, or `int64`. seed: A Python integer. Used to create a random seed for the distribution. See @{tf.set_random_seed} for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random uniform values. Raises: ValueError: If `dtype` is integral and `maxval` is not specified. """ dtype = dtypes.as_dtype(dtype) if maxval is None: if dtype.is_integer: raise ValueError("Must specify maxval for integer dtype %r" % dtype) maxval = 1 with ops.name_scope(name, "random_uniform", [shape, minval, maxval]) as name: shape = _ShapeTensor(shape) minval = ops.convert_to_tensor(minval, dtype=dtype, name="min") maxval = ops.convert_to_tensor(maxval, dtype=dtype, name="max") seed1, seed2 = random_seed.get_seed(seed) if dtype.is_integer: return gen_random_ops._random_uniform_int( shape, minval, maxval, seed=seed1, seed2=seed2, name=name) else: rnd = gen_random_ops._random_uniform( shape, dtype, seed=seed1, seed2=seed2) return math_ops.add(rnd * (maxval - minval), minval, name=name)
def bias_add(inputs, activation_fn=None, initializer=init_ops.zeros_initializer, regularizer=None, reuse=None, variables_collections=None, outputs_collections=None, trainable=True, scope=None): """Adds a bias to the inputs. Can be used as a normalizer function for conv2d and fully_connected. Args: inputs: a tensor of with at least rank 2 and value for the last dimension, e.g. `[batch_size, depth]`, `[None, None, None, depth]`. activation_fn: activation function, default set to None to skip it and maintain a linear activation. initializer: An initializer for the bias, defaults to 0. regularizer: A regularizer like the result of `l1_regularizer` or `l2_regularizer`. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. variables_collections: optional collections for the variables. outputs_collections: collections to add the outputs. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). scope: Optional scope for variable_scope. Returns: a tensor representing the result of adding biases to the inputs. """ with variable_scope.variable_scope(scope, 'BiasAdd', [inputs], reuse=reuse) as sc: inputs = ops.convert_to_tensor(inputs) dtype = inputs.dtype.base_dtype num_features = utils.last_dimension(inputs.get_shape(), min_rank=2) biases_collections = utils.get_variable_collections(variables_collections, 'biases') biases = variables.model_variable('biases', shape=[num_features,], dtype=dtype, initializer=initializer, regularizer=regularizer, collections=biases_collections, trainable=trainable) outputs = nn.bias_add(inputs, biases) if activation_fn is not None: outputs = activation_fn(outputs) return utils.collect_named_outputs(outputs_collections, sc.original_name_scope, outputs)
def _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins): """Convert histograms to auc. Args: hist_true_acc: `Tensor` holding accumulated histogram of scores for records that were `True`. hist_false_acc: `Tensor` holding accumulated histogram of scores for records that were `False`. nbins: Integer number of bins in the histograms. Returns: Scalar `Tensor` estimating AUC. """ # Note that this follows the "Approximating AUC" section in: # Efficient AUC learning curve calculation, R. R. Bouckaert, # AI'06 Proceedings of the 19th Australian joint conference on Artificial # Intelligence: advances in Artificial Intelligence # Pages 181-191. # Note that the above paper has an error, and we need to re-order our bins to # go from high to low score. # Normalize histogram so we get fraction in each bin. normed_hist_true = math_ops.truediv(hist_true_acc, math_ops.reduce_sum(hist_true_acc)) normed_hist_false = math_ops.truediv(hist_false_acc, math_ops.reduce_sum(hist_false_acc)) # These become delta x, delta y from the paper. delta_y_t = array_ops.reverse(normed_hist_true, [True], name='delta_y_t') delta_x_t = array_ops.reverse(normed_hist_false, [True], name='delta_x_t') # strict_1d_cumsum requires float32 args. delta_y_t = math_ops.cast(delta_y_t, dtypes.float32) delta_x_t = math_ops.cast(delta_x_t, dtypes.float32) # Trapezoidal integration, \int_0^1 0.5 * (y_t + y_{t-1}) dx_t y_t = _strict_1d_cumsum(delta_y_t, nbins) first_trap = delta_x_t[0] * y_t[0] / 2.0 other_traps = delta_x_t[1:] * (y_t[1:] + y_t[:nbins - 1]) / 2.0 return math_ops.add(first_trap, math_ops.reduce_sum(other_traps), name='auc') # TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available. # Also see if cast to float32 above can be removed with new cumsum. # See: https://github.com/tensorflow/tensorflow/issues/813
def dense_to_sparse_tensor(dense_tensor, ignore_value=None): """Converts a dense Tensor to a SparseTensor, dropping ignore_value cells. Args: dense_tensor: A `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`). Returns: A `SparseTensor` with the same shape as `dense_tensor`. Raises: ValueError: when `dense_tensor`'s rank is `None`. """ with ops.name_scope("DenseToSparseTensor"): dense_t = ops.convert_to_tensor(dense_tensor) if dense_t.get_shape().ndims is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError("dense_tensor.get_shape() should be defined, got None.") if ignore_value is None: if dense_t.dtype == dtypes.string: # Exception due to TF strings are converted to numpy objects by default. ignore_value = "" else: ignore_value = dense_t.dtype.as_numpy_dtype() dense_shape = math_ops.cast(array_ops.shape(dense_t), dtypes.int64) indices = array_ops.where( math_ops.not_equal(dense_t, math_ops.cast(ignore_value, dense_t.dtype))) index_dims = len(dense_t.get_shape()) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(dense_t, [-1]) flat_indices = indices[:, index_dims - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if index_dims > 1: higher_dims = indices[:, :index_dims - 1] shape_multipliers = array_ops.pack( _multiplier_helper(array_ops.unpack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.mul(higher_dims, shape_multipliers), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _remove_squeezable_dimensions(predictions, labels, weights): """Squeeze last dim if needed. Squeezes `predictions` and `labels` if their rank differs by 1. Squeezes `weights` if its rank is 1 more than the new rank of `predictions` This will use static shape if available. Otherwise, it will add graph operations, which could result in a performance hit. Args: predictions: Predicted values, a `Tensor` of arbitrary dimensions. labels: Label values, a `Tensor` whose dimensions match `predictions`. weights: optional `weights` tensor. It will be squeezed if its rank is 1 more than the new rank of `predictions` Returns: Tuple of `predictions`, `labels` and `weights`, possibly with the last dimension squeezed. """ predictions, labels = tensor_util.remove_squeezable_dimensions( predictions, labels) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) if weights is not None: predictions_shape = predictions.get_shape() predictions_rank = predictions_shape.ndims weights_shape = weights.get_shape() weights_rank = weights_shape.ndims if (predictions_rank is not None) and (weights_rank is not None): # Use static rank. if weights_rank - predictions_rank == 1: weights = array_ops.squeeze(weights, [-1]) elif (weights_rank is None) or ( weights_shape.dims[-1].is_compatible_with(1)): # Use dynamic rank weights = control_flow_ops.cond( math_ops.equal(array_ops.rank(weights), math_ops.add(array_ops.rank(predictions), 1)), lambda: array_ops.squeeze(weights, [-1]), lambda: weights) return predictions, labels, weights