我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.python.ops.control_flow_ops.cond()。
def _step(time, sequence_length, min_sequence_length, max_sequence_length, zero_logit, generate_logit): # Step 1: determine whether we need to call_cell or not empty_update = lambda: zero_logit logit = control_flow_ops.cond( time < max_sequence_length, generate_logit, empty_update) # Step 2: determine whether we need to copy through state and/or outputs existing_logit = lambda: logit def copy_through(): # Use broadcasting select to determine which values should get # the previous state & zero output, and which values should get # a calculated state & output. copy_cond = (time >= sequence_length) return math_ops.select(copy_cond, zero_logit, logit) logit = control_flow_ops.cond( time < min_sequence_length, existing_logit, copy_through) logit.set_shape(logit.get_shape()) return logit
def static_cond(pred, fn1, fn2): """Return either fn1() or fn2() based on the boolean value of `pred`. Same signature as `control_flow_ops.cond()` but requires pred to be a bool. Args: pred: A value determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. Returns: Tensors returned by the call to either `fn1` or `fn2`. Raises: TypeError: if `fn1` or `fn2` is not callable. """ if not callable(fn1): raise TypeError('fn1 must be callable.') if not callable(fn2): raise TypeError('fn2 must be callable.') if pred: return fn1() else: return fn2()
def smart_cond(pred, fn1, fn2, name=None): """Return either fn1() or fn2() based on the boolean predicate/value `pred`. If `pred` is bool or has a constant value it would use `static_cond`, otherwise it would use `tf.cond`. Args: pred: A scalar determining whether to return the result of `fn1` or `fn2`. fn1: The callable to be performed if pred is true. fn2: The callable to be performed if pred is false. name: Optional name prefix when using tf.cond Returns: Tensors returned by the call to either `fn1` or `fn2`. """ pred_value = constant_value(pred) if pred_value is not None: # Use static_cond if pred has a constant value. return static_cond(pred_value, fn1, fn2) else: # Use dynamic cond otherwise. return control_flow_ops.cond(pred, fn1, fn2, name)
def average_impurity(self): """Constructs a TF graph for evaluating the average leaf impurity of a tree. If in regression mode, this is the leaf variance. If in classification mode, this is the gini impurity. Returns: The last op in the graph. """ children = array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1]) is_leaf = math_ops.equal(constants.LEAF_NODE, children) leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf), squeeze_dims=[1])) counts = array_ops.gather(self.variables.node_sums, leaves) gini = self._weighted_gini(counts) # Guard against step 1, when there often are no leaves yet. def impurity(): return gini # Since average impurity can be used for loss, when there's no data just # return a big number so that loss always decreases. def big(): return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000. return control_flow_ops.cond(math_ops.greater( array_ops.shape(leaves)[0], 0), impurity, big)
def _assert(cond, ex_type, msg): """A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op. """ if _is_tensor(cond): return [control_flow_ops.Assert(cond, [msg])] else: if not cond: raise ex_type(msg) else: return []
def _safe_scalar_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is != 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return control_flow_ops.cond( math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name)
def _get_loss(self, features, labels, data_spec=None): """Constructs, caches, and returns the inference-based loss.""" if self._loss is not None: return self._loss def _average_loss(): probs = self.inference_graph(features, data_spec=data_spec) return math_ops.reduce_sum(self.loss_fn( probs, labels)) / math_ops.to_float( array_ops.shape(features)[0]) self._loss = control_flow_ops.cond( self.average_size() > 0, _average_loss, lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32)) return self._loss
def _assert(cond, ex_type, msg): """A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op. """ if is_tensor(cond): return [logging_ops.Assert(cond, [msg])] else: if not cond: raise ex_type(msg) else: return []
def _resize_aux(image, new_shorter_edge_tensor): shape = tf.shape(image) height = shape[0] width = shape[1] height_smaller_than_width = tf.less_equal(height, width) new_height_and_width = cf.cond( height_smaller_than_width, lambda: (new_shorter_edge_tensor, _compute_longer_edge(height, width, new_shorter_edge_tensor)), lambda: (_compute_longer_edge(width, height, new_shorter_edge_tensor), new_shorter_edge_tensor) ) # workaround since tf.image.resize_images() does not work image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, tf.pack(new_height_and_width)) return tf.squeeze(image, [0])
def random_flip_left_right(image, bboxes, seed=None): """Random flip left-right of an image and its bounding boxes. """ def flip_bboxes(bboxes): """Flip bounding boxes coordinates. """ bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3], bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1) return bboxes # Random flip. Tensorflow implementation. with tf.name_scope('random_flip_left_right'): image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) # Flip image. result = control_flow_ops.cond(mirror_cond, lambda: array_ops.reverse_v2(image, [1]), lambda: image) # Flip bboxes. bboxes = control_flow_ops.cond(mirror_cond, lambda: flip_bboxes(bboxes), lambda: bboxes) return fix_image_flip_shape(image, result), bboxes
def _argmax_or_mcsearch(embedding, output_projection=None, update_embedding=True, mc_search=False): def loop_function(prev, _): if output_projection is not None: prev = nn_ops.xw_plus_b(prev, output_projection[0], output_projection[1]) if isinstance(mc_search, bool): #tf.multinomial???prev????????? ?-1?????????? prev_symbol = tf.reshape(tf.multinomial(prev, 1), [-1]) if mc_search else math_ops.argmax(prev, 1) else: prev_symbol = tf.cond(mc_search, lambda: tf.reshape(tf.multinomial(prev, 1), [-1]), lambda: tf.argmax(prev, 1)) emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol) #??????????? if not update_embedding: emb_prev = array_ops.stop_gradient(emb_prev) return emb_prev return loop_function
def bn(x, is_training): x_shape = x.get_shape() params_shape = x_shape[-1:] axis = list(range(len(x_shape) - 1)) beta = _get_variable('beta', params_shape, initializer=tf.zeros_initializer()) gamma = _get_variable('gamma', params_shape, initializer=tf.ones_initializer()) moving_mean = _get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer(), trainable=False) moving_variance = _get_variable('moving_variance', params_shape, initializer=tf.ones_initializer(), trainable=False) # These ops will only be preformed when training. mean, variance = tf.nn.moments(x, axis) update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY) update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY) tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) mean, variance = control_flow_ops.cond( is_training, lambda: (mean, variance), lambda: (moving_mean, moving_variance)) return tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON)
def _step(time, sequence_length, min_sequence_length, max_sequence_length, zero_logit, generate_logit): # Step 1: determine whether we need to call_cell or not empty_update = lambda: zero_logit logit = control_flow_ops.cond( time < max_sequence_length, generate_logit, empty_update) # Step 2: determine whether we need to copy through state and/or outputs existing_logit = lambda: logit def copy_through(): # Use broadcasting select to determine which values should get # the previous state & zero output, and which values should get # a calculated state & output. copy_cond = (time >= sequence_length) return tf.where(copy_cond, zero_logit, logit) logit = control_flow_ops.cond( time < min_sequence_length, existing_logit, copy_through) logit.set_shape(zero_logit.get_shape()) return logit
def testTraversesControlInputs(self): dt1 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.)) logits = dt1.value() * 3. dt2 = st.StochasticTensor(distributions.Bernoulli(logits=logits)) dt3 = st.StochasticTensor(distributions.Normal(loc=0., scale=1.)) x = dt3.value() y = array_ops.ones((2, 2)) * 4. z = array_ops.ones((2, 2)) * 3. out = control_flow_ops.cond( math_ops.cast(dt2, dtypes.bool), lambda: math_ops.add(x, y), lambda: math_ops.square(z)) out += 5. dep_map = sg._stochastic_dependencies_map([out]) self.assertEqual(dep_map[dt1], set([out])) self.assertEqual(dep_map[dt2], set([out])) self.assertEqual(dep_map[dt3], set([out]))
def _safe_scalar_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is 0. Args: numerator: A scalar `float64` `Tensor`. denominator: A scalar `float64` `Tensor`. name: Name for the returned op. Returns: 0 if `denominator` == 0, else `numerator` / `denominator` """ numerator.get_shape().with_rank_at_most(1) denominator.get_shape().with_rank_at_most(1) return control_flow_ops.cond( math_ops.equal( array_ops.constant(0.0, dtype=dtypes.float64), denominator), lambda: array_ops.constant(0.0, dtype=dtypes.float64), lambda: math_ops.div(numerator, denominator), name=name)
def _log_prob(self, event): # TODO(jaana): The current sigmoid_cross_entropy_with_logits has # inconsistent behavior for logits = inf/-inf. event = math_ops.cast(event, self.logits.dtype) logits = self.logits # sigmoid_cross_entropy_with_logits doesn't broadcast shape, # so we do this here. broadcast = lambda logits, event: ( array_ops.ones_like(event) * logits, array_ops.ones_like(logits) * event) # First check static shape. if (event.get_shape().is_fully_defined() and logits.get_shape().is_fully_defined()): if event.get_shape() != logits.get_shape(): logits, event = broadcast(logits, event) else: logits, event = control_flow_ops.cond( distribution_util.same_dynamic_shape(logits, event), lambda: (logits, event), lambda: broadcast(logits, event)) return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits)
def testCond(self): """Tests that compilation handles switch operators.""" with self.test_session() as session: x = array_ops.placeholder(dtypes.float32) y = array_ops.placeholder(dtypes.float32) c = array_ops.placeholder(dtypes.bool) with jit_scope(): z = x + 1.0 w = control_flow_ops.cond(c, lambda: z, lambda: y) t = math_ops.add(z, w) # If JIT compilation chooses to cluster z and t, then execution will # deadlock. run_metadata = config_pb2.RunMetadata() result = session.run(t, {x: np.float32(2), y: np.float32(4), c: True}, run_metadata=run_metadata, options=config_pb2.RunOptions( trace_level=config_pb2.RunOptions.FULL_TRACE)) self.assert_(MetadataHasXlaLaunch(run_metadata)) self.assertAllClose(result, np.float32(6), rtol=1e-1)
def initialize(self, name=None): with ops.name_scope(name, "TrainingHelperInitialize"): finished = math_ops.equal(0, self._sequence_length) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, lambda: self._zero_inputs, lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas)) return (finished, next_inputs)
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs): """next_inputs_fn for TrainingHelper.""" with ops.name_scope(name, "TrainingHelperNextInputs", [time, outputs, state]): next_time = time + 1 finished = (next_time >= self._sequence_length) all_finished = math_ops.reduce_all(finished) def read_from_ta(inp): return inp.read(next_time) next_inputs = control_flow_ops.cond( all_finished, lambda: self._zero_inputs, lambda: nest.map_structure(read_from_ta, self._input_tas)) return (finished, next_inputs, state)
def next_inputs(self, time, outputs, state, sample_ids, name=None): with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample", [time, outputs, state, sample_ids]): (finished, base_next_inputs, state) = ( super(ScheduledEmbeddingTrainingHelper, self).next_inputs( time=time, outputs=outputs, state=state, sample_ids=sample_ids, name=name)) def maybe_sample(): """Perform scheduled sampling.""" where_sampling = math_ops.cast( array_ops.where(sample_ids > -1), dtypes.int32) where_not_sampling = math_ops.cast( array_ops.where(sample_ids <= -1), dtypes.int32) where_sampling_flat = array_ops.reshape(where_sampling, [-1]) where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1]) sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat) inputs_not_sampling = array_ops.gather( base_next_inputs, where_not_sampling_flat) sampled_next_inputs = self._embedding_fn(sample_ids_sampling) base_shape = array_ops.shape(base_next_inputs) return (array_ops.scatter_nd(indices=where_sampling, updates=sampled_next_inputs, shape=base_shape) + array_ops.scatter_nd(indices=where_not_sampling, updates=inputs_not_sampling, shape=base_shape)) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, lambda: base_next_inputs, maybe_sample) return (finished, next_inputs, state)
def next_inputs(self, time, outputs, state, sample_ids, name=None): """next_inputs_fn for GreedyEmbeddingHelper.""" del time, outputs # unused by next_inputs_fn finished = math_ops.equal(sample_ids, self._end_token) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, # If we're finished, the next_inputs value doesn't matter lambda: self._start_inputs, lambda: self._embedding_fn(sample_ids)) return (finished, next_inputs, state)
def _cond(condition, then_lambda, else_lambda): '''Backwards compatible interface to tf.cond prior to public introduction. ''' try: cond_fn = tf.cond except AttributeError: from tensorflow.python.ops import control_flow_ops cond_fn = control_flow_ops.cond return cond_fn(condition, then_lambda, else_lambda)
def flip_randomly_left_right_image_with_annotation(image_tensor, annotation_tensor): """Accepts image tensor and annotation tensor and returns randomly flipped tensors of both. The function performs random flip of image and annotation tensors with probability of 1/2 The flip is performed or not performed for image and annotation consistently, so that annotation matches the image. Parameters ---------- image_tensor : Tensor of size (width, height, 3) Tensor with image annotation_tensor : Tensor of size (width, height, 1) Tensor with annotation Returns ------- randomly_flipped_img : Tensor of size (width, height, 3) of type tf.float. Randomly flipped image tensor randomly_flipped_annotation : Tensor of size (width, height, 1) Randomly flipped annotation tensor """ # Random variable: two possible outcomes (0 or 1) # with a 1 in 2 chance random_var = tf.random_uniform(maxval=2, dtype=tf.int32, shape=[]) randomly_flipped_img = control_flow_ops.cond(pred=tf.equal(random_var, 0), fn1=lambda: tf.image.flip_left_right(image_tensor), fn2=lambda: image_tensor) randomly_flipped_annotation = control_flow_ops.cond(pred=tf.equal(random_var, 0), fn1=lambda: tf.image.flip_left_right(annotation_tensor), fn2=lambda: annotation_tensor) return randomly_flipped_img, randomly_flipped_annotation
def next_inputs(self, sample_ids,name=None): finished = math_ops.equal(sample_ids, self.config.eos_token) all_finished = math_ops.reduce_all(finished) next_inputs = control_flow_ops.cond( all_finished, # If we're finished, the next_inputs value doesn't matter lambda: tf.nn.embedding_lookup(self.target_embedding, tf.tile([self.config.eos_token], [self.config.beam_width])), lambda: tf.nn.embedding_lookup(self.target_embedding, sample_ids)) return all_finished, next_inputs
def bn(x, c): x_shape = x.get_shape() params_shape = x_shape[-1:] axis = list(range(len(x_shape) - 1)) beta = _get_variable('beta', params_shape, initializer=tf.zeros_initializer()) #tf.constant_initializer(0.00, dtype='float') gamma = _get_variable('gamma', params_shape, initializer=tf.ones_initializer()) moving_mean = _get_variable('moving_mean', params_shape, initializer=tf.zeros_initializer(), trainable=False) moving_variance = _get_variable('moving_variance', params_shape, initializer=tf.ones_initializer(), trainable=False) # These ops will only be performed when training. mean, variance = tf.nn.moments(x, axis) update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY) update_moving_variance = moving_averages.assign_moving_average( moving_variance, variance, BN_DECAY) tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) mean, variance = control_flow_ops.cond( c['is_training'], lambda: (mean, variance), lambda: (moving_mean, moving_variance)) x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON) return x # wrapper for get_variable op
def batch_norm(x, phase_train): """ Batch normalization on convolutional maps. Args: x: Tensor, 4D BHWD input maps n_out: integer, depth of input maps phase_train: boolean tf.Variable, true indicates training phase scope: string, variable scope affn: whether to affn-transform outputs Return: normed: batch-normalized maps Ref: http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow/33950177 """ name = 'batch_norm' with tf.variable_scope(name): phase_train = tf.convert_to_tensor(phase_train, dtype=tf.bool) n_out = int(x.get_shape()[3]) beta = tf.Variable(tf.constant(0.0, shape=[n_out], dtype=x.dtype), name=name+'/beta', trainable=True, dtype=x.dtype) gamma = tf.Variable(tf.constant(1.0, shape=[n_out], dtype=x.dtype), name=name+'/gamma', trainable=True, dtype=x.dtype) batch_mean, batch_var = tf.nn.moments(x, [0,1,2], name='moments') ema = tf.train.ExponentialMovingAverage(decay=0.9) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = control_flow_ops.cond(phase_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3) return normed
def _bn(self, x, params_init, is_training): x_shape = x.get_shape() axis = list(range(len(x_shape) - 1)) beta = self._get_variable_const('beta', initializer=tf.constant(params_init['bias'])) gamma = self._get_variable_const('gamma', initializer=tf.constant(params_init['weight'])) moving_mean = self._get_variable_const('moving_mean', initializer=tf.constant(params_init['running_mean']), trainable=False) moving_variance = self._get_variable_const('moving_variance', initializer=tf.constant(params_init['running_var']), trainable=False) # mean, variance = tf.nn.moments(x, axis) # update_moving_mean = moving_averages.assign_moving_average(moving_mean, mean, BN_DECAY) # update_moving_variance = moving_averages.assign_moving_average(moving_variance, variance, BN_DECAY) # tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean) # tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance) # # if ~is_training: # mean = moving_mean # variance = moving_variance # else: # ema = tf.train.ExponentialMovingAverage(decay=BN_DECAY) # # def mean_var_with_update(): # ema_apply_op = ema.apply([mean, variance]) # with tf.control_dependencies([ema_apply_op]): # return tf.identity(mean), tf.identity(variance) # mean, variance = mean_var_with_update() # mean, variance = control_flow_ops.cond(is_training, lambda: (mean, variance), # lambda: (moving_mean, moving_variance)) # x = tf.nn.batch_normalization(x, mean, variance, beta, gamma, BN_EPSILON) x = tf.layers.batch_normalization(x, momentum=BN_DECAY, epsilon=BN_EPSILON, beta_initializer=tf.constant_initializer(params_init['bias']), gamma_initializer=tf.constant_initializer(params_init['weight']), moving_mean_initializer=tf.constant_initializer(params_init['running_mean']), moving_variance_initializer=tf.constant_initializer(params_init['running_var']), training=is_training) return x
def _cond(condition, then_lambda, else_lambda): """Backwards compatible interface to tf.cond prior to public introduction. """ try: cond_fn = tf.cond except AttributeError: from tensorflow.python.ops import control_flow_ops cond_fn = control_flow_ops.cond return cond_fn(condition, then_lambda, else_lambda)
def tree_initialization(self): def _init_tree(): return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op def _nothing(): return control_flow_ops.no_op() return control_flow_ops.cond( math_ops.equal(array_ops.squeeze(array_ops.slice( self.variables.tree, [0, 0], [1, 1])), -2), _init_tree, _nothing)
def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Arguments: condition: scalar tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. """ if condition.dtype != dtypes_module.bool: condition = math_ops.cast(condition, 'bool') if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn) return x