我们从Python开源项目中,提取了以下44个代码示例,用于说明如何使用tensorflow.python.ops.math_ops.less()。
def _reset(self): """Resets wait counter and cooldown counter. """ if self.mode not in ['auto', 'min', 'max']: logging.warning('Learning Rate Plateau Reducing mode %s is unknown, ' 'fallback to auto mode.' % (self.mode)) self.mode = 'auto' if (self.mode == 'min' or (self.mode == 'auto' and 'acc' not in self.monitor)): self.monitor_op = lambda a, b: np.less(a, b - self.epsilon) self.best = np.Inf else: self.monitor_op = lambda a, b: np.greater(a, b + self.epsilon) self.best = -np.Inf self.cooldown_counter = 0 self.wait = 0 self.lr_epsilon = self.min_lr * 1e-4
def _padding_mask(sequence_lengths, padded_length): """Creates a mask used for calculating losses with padded input. Args: sequence_lengths: a `Tensor` of shape `[batch_size]` containing the unpadded length of each sequence. padded_length: a scalar `Tensor` indicating the length of the sequences after padding Returns: A boolean `Tensor` M of shape `[batch_size, padded_length]` where `M[i, j] == True` when `lengths[i] > j`. """ range_tensor = math_ops.range(padded_length) return math_ops.less(array_ops.expand_dims(range_tensor, 0), array_ops.expand_dims(sequence_lengths, 1))
def padding_mask(sequence_lengths, padded_length): """Creates a mask used for calculating losses with padded input. Args: sequence_lengths: A `Tensor` of shape `[batch_size]` containing the unpadded length of each sequence. padded_length: A scalar `Tensor` indicating the length of the sequences after padding Returns: A boolean `Tensor` M of shape `[batch_size, padded_length]` where `M[i, j] == True` when `lengths[i] > j`. """ range_tensor = math_ops.range(padded_length) return math_ops.less(array_ops.expand_dims(range_tensor, 0), array_ops.expand_dims(sequence_lengths, 1))
def random_flip_left_right(image, bboxes, seed=None): """Random flip left-right of an image and its bounding boxes. """ def flip_bboxes(bboxes): """Flip bounding boxes coordinates. """ bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3], bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1) return bboxes # Random flip. Tensorflow implementation. with tf.name_scope('random_flip_left_right'): image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) # Flip image. result = control_flow_ops.cond(mirror_cond, lambda: array_ops.reverse_v2(image, [1]), lambda: image) # Flip bboxes. bboxes = control_flow_ops.cond(mirror_cond, lambda: flip_bboxes(bboxes), lambda: bboxes) return fix_image_flip_shape(image, result), bboxes
def testCorrectlyPicksVector(self): with self.test_session(): x = np.arange(10, 12) y = np.arange(15, 18) self.assertAllEqual(x, distribution_util.pick_vector( math_ops.less(0, 5), x, y).eval()) self.assertAllEqual(y, distribution_util.pick_vector( math_ops.less(5, 0), x, y).eval()) self.assertAllEqual(x, distribution_util.pick_vector( constant_op.constant(True), x, y)) # No eval. self.assertAllEqual(y, distribution_util.pick_vector( constant_op.constant(False), x, y)) # No eval.
def sample(self): u = tf.random_uniform(tf.shape(self.ps)) return tf.to_float(math_ops.less(u, self.ps))
def random_flip_left_right(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack( [1.0, 1.0, uniform_random, 1.0]), 0.5) return tf.reverse(image, mirror)
def random_flip_up_down(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack( [1.0, uniform_random, 1.0, 1.0]), 0.5) return tf.reverse(image, mirror)
def random_flip_left_right(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack([1.0, 1.0, uniform_random, 1.0]), 0.5) return tf.reverse(image, mirror)
def random_flip_up_down(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack([1.0, uniform_random, 1.0, 1.0]), 0.5) return tf.reverse(image, mirror)
def less(x, y): """Element-wise truth value of (x < y). Arguments: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return math_ops.less(x, y)
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1): super(ModelCheckpoint, self).__init__() self.monitor = monitor self.verbose = verbose self.filepath = filepath self.save_best_only = save_best_only self.save_weights_only = save_weights_only self.period = period self.epochs_since_last_save = 0 if mode not in ['auto', 'min', 'max']: logging.warning('ModelCheckpoint mode %s is unknown, ' 'fallback to auto mode.' % (mode)) mode = 'auto' if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def __init__(self, monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='auto'): super(EarlyStopping, self).__init__() self.monitor = monitor self.patience = patience self.verbose = verbose self.min_delta = min_delta self.wait = 0 self.stopped_epoch = 0 if mode not in ['auto', 'min', 'max']: logging.warning('EarlyStopping mode %s is unknown, ' 'fallback to auto mode.' % (self.mode)) mode = 'auto' if mode == 'min': self.monitor_op = np.less elif mode == 'max': self.monitor_op = np.greater else: if 'acc' in self.monitor or self.monitor.startswith('fmeasure'): self.monitor_op = np.greater else: self.monitor_op = np.less if self.monitor_op == np.greater: self.min_delta *= 1 else: self.min_delta *= -1
def on_train_begin(self, logs=None): # Allow instances to be re-used self.wait = 0 self.stopped_epoch = 0 self.best = np.Inf if self.monitor_op == np.less else -np.Inf
def random_flip_left_right(image, bboxes, seed=None): """Random flip left-right of an image and its bounding boxes. """ def flip_bboxes(bboxes): """Flip bounding boxes coordinates. """ bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3], bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1) return bboxes # Random flip. Tensorflow implementation. with tf.name_scope('random_flip_left_right'): image = ops.convert_to_tensor(image, name='image') _Check3DImage(image, require_static=False) uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror_cond = math_ops.less(uniform_random, .5) #debugging info # mirror_cond = tf.Print(mirror_cond, [mirror_cond], 'flipped image') # Flip image. result = control_flow_ops.cond(mirror_cond, lambda: array_ops.reverse_v2(image, [1]), lambda: image) # Flip bboxes. bboxes = control_flow_ops.cond(mirror_cond, lambda: flip_bboxes(bboxes), lambda: bboxes) return fix_image_flip_shape(image, result), bboxes
def _mode(self): s = self.df - self.dimension - 1. s = math_ops.select( math_ops.less(s, 0.), constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"), s) if self.cholesky_input_output_matrices: return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense() return s * self.scale_operator_pd.to_dense()
def _sample_n(self, n, seed=None): new_shape = array_ops.concat(0, ([n], self.batch_shape())) uniform = random_ops.random_uniform( new_shape, seed=seed, dtype=self.p.dtype) sample = math_ops.less(uniform, self.p) return math_ops.cast(sample, self.dtype)
def __lt__(self, other): return less(self, other)
def _flip_vector_to_matrix_dynamic(vec, batch_shape): """flip_vector_to_matrix with dynamic shapes.""" # Shapes associated with batch_shape batch_rank = array_ops.size(batch_shape) # Shapes associated with vec. vec = ops.convert_to_tensor(vec, name="vec") vec_shape = array_ops.shape(vec) vec_rank = array_ops.rank(vec) vec_batch_rank = vec_rank - 1 m = vec_batch_rank - batch_rank # vec_shape_left = [M1,...,Mm] or []. vec_shape_left = array_ops.slice(vec_shape, [0], [m]) # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1 # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm] condensed_shape = [math_ops.reduce_prod(vec_shape_left)] k = array_ops.gather(vec_shape, vec_rank - 1) new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape)) def _flip_front_dims_to_back(): # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm] perm = array_ops.concat( 0, (math_ops.range(m, vec_rank), math_ops.range(0, m))) return array_ops.transpose(vec, perm=perm) x_flipped = control_flow_ops.cond( math_ops.less(0, m), _flip_front_dims_to_back, lambda: array_ops.expand_dims(vec, -1)) return array_ops.reshape(x_flipped, new_shape)
def setUp(self): super(CoreBinaryOpsTest, self).setUp() self.x_probs_broadcast_tensor = array_ops.reshape( self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]) self.channel_probs_broadcast_tensor = array_ops.reshape( self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size]) # == and != are not element-wise for tf.Tensor, so they shouldn't be # elementwise for LabeledTensor, either. self.ops = [ ('add', operator.add, math_ops.add, core.add), ('sub', operator.sub, math_ops.subtract, core.sub), ('mul', operator.mul, math_ops.multiply, core.mul), ('div', operator.truediv, math_ops.div, core.div), ('mod', operator.mod, math_ops.mod, core.mod), ('pow', operator.pow, math_ops.pow, core.pow_function), ('equal', None, math_ops.equal, core.equal), ('less', operator.lt, math_ops.less, core.less), ('less_equal', operator.le, math_ops.less_equal, core.less_equal), ('not_equal', None, math_ops.not_equal, core.not_equal), ('greater', operator.gt, math_ops.greater, core.greater), ('greater_equal', operator.ge, math_ops.greater_equal, core.greater_equal), ] self.test_lt_1 = self.x_probs_lt self.test_lt_2 = self.channel_probs_lt self.test_lt_1_broadcast = self.x_probs_broadcast_tensor self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor self.broadcast_axes = [self.a0, self.a1, self.a3]
def test_reflexive(self): labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division for op_name, infix_op, _, lt_op in self.ops: if infix_op is not None: expected_lt = lt_op(2, labeled_tensor) actual_lt = infix_op(2, labeled_tensor) # Python uses greater for the reflexive version of less (and vise-versa) if 'less' in op_name: op_name = op_name.replace('less', 'greater') elif 'greater' in op_name: op_name = op_name.replace('greater', 'less') self.assertIn(op_name, actual_lt.name) self.assertLabeledTensorsEqual(expected_lt, actual_lt)
def _mode(self): s = self.df - self.dimension - 1. s = array_ops.where( math_ops.less(s, 0.), constant_op.constant(float("NaN"), dtype=self.dtype, name="nan"), s) if self.cholesky_input_output_matrices: return math_ops.sqrt(s) * self.scale_operator_pd.sqrt_to_dense() return s * self.scale_operator_pd.to_dense()
def _flip_vector_to_matrix_dynamic(vec, batch_shape): """flip_vector_to_matrix with dynamic shapes.""" # Shapes associated with batch_shape batch_rank = array_ops.size(batch_shape) # Shapes associated with vec. vec = ops.convert_to_tensor(vec, name="vec") vec_shape = array_ops.shape(vec) vec_rank = array_ops.rank(vec) vec_batch_rank = vec_rank - 1 m = vec_batch_rank - batch_rank # vec_shape_left = [M1,...,Mm] or []. vec_shape_left = array_ops.strided_slice(vec_shape, [0], [m]) # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1 # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm] condensed_shape = [math_ops.reduce_prod(vec_shape_left)] k = array_ops.gather(vec_shape, vec_rank - 1) new_shape = array_ops.concat((batch_shape, [k], condensed_shape), 0) def _flip_front_dims_to_back(): # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm] perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)), 0) return array_ops.transpose(vec, perm=perm) x_flipped = control_flow_ops.cond( math_ops.less(0, m), _flip_front_dims_to_back, lambda: array_ops.expand_dims(vec, -1)) return array_ops.reshape(x_flipped, new_shape)
def _cdf(self, x): # Take Abs(scale) to make subsequent where work correctly. y = (x - self.loc) / math_ops.abs(self.scale) x_t = self.df / (y**2. + self.df) neg_cdf = 0.5 * math_ops.betainc(0.5 * self.df, 0.5, x_t) return array_ops.where(math_ops.less(y, 0.), neg_cdf, 1. - neg_cdf)
def _ndtr(x): """Implements ndtr core logic.""" half_sqrt_2 = constant_op.constant( 0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2") w = x * half_sqrt_2 z = math_ops.abs(w) y = array_ops.where(math_ops.less(z, half_sqrt_2), 1. + math_ops.erf(w), array_ops.where(math_ops.greater(w, 0.), 2. - math_ops.erfc(z), math_ops.erfc(z))) return 0.5 * y
def testGetLogitsAndProbsProbabilityValidateArgs(self): p = [0.01, 0.2, 0.5, 0.7, .99] # Component less than 0. p2 = [-1, 0.2, 0.5, 0.3, .2] # Component greater than 1. p3 = [2, 0.2, 0.5, 0.3, .2] with self.test_session(): _, prob = distribution_util.get_logits_and_probs( probs=p, validate_args=True) prob.eval() with self.assertRaisesOpError("Condition x >= 0"): _, prob = distribution_util.get_logits_and_probs( probs=p2, validate_args=True) prob.eval() _, prob = distribution_util.get_logits_and_probs( probs=p2, validate_args=False) prob.eval() with self.assertRaisesOpError("probs has components greater than 1"): _, prob = distribution_util.get_logits_and_probs( probs=p3, validate_args=True) prob.eval() _, prob = distribution_util.get_logits_and_probs( probs=p3, validate_args=False) prob.eval()
def testGetLogitsAndProbsProbabilityValidateArgsMultidimensional(self): p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32) # Component less than 0. Still sums to 1. p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32) # Component greater than 1. Does not sum to 1. p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32) # Does not sum to 1. p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32) with self.test_session(): _, prob = distribution_util.get_logits_and_probs( probs=p, multidimensional=True) prob.eval() with self.assertRaisesOpError("Condition x >= 0"): _, prob = distribution_util.get_logits_and_probs( probs=p2, multidimensional=True, validate_args=True) prob.eval() _, prob = distribution_util.get_logits_and_probs( probs=p2, multidimensional=True, validate_args=False) prob.eval() with self.assertRaisesOpError( "(probs has components greater than 1|probs does not sum to 1)"): _, prob = distribution_util.get_logits_and_probs( probs=p3, multidimensional=True, validate_args=True) prob.eval() _, prob = distribution_util.get_logits_and_probs( probs=p3, multidimensional=True, validate_args=False) prob.eval() with self.assertRaisesOpError("probs does not sum to 1"): _, prob = distribution_util.get_logits_and_probs( probs=p4, multidimensional=True, validate_args=True) prob.eval() _, prob = distribution_util.get_logits_and_probs( probs=p4, multidimensional=True, validate_args=False) prob.eval()
def testLoops(self): """Tests that loops work on XLA devices.""" with session_lib.Session() as session: x = array_ops.placeholder(dtypes.float32) with ops.device("device:XLA_CPU:0"): c = lambda i, _: math_ops.less(i, 5) b = lambda i, x: (i + 1, x * 2.0 + 1.0) _, y = control_flow_ops.while_loop(c, b, (constant_op.constant(0), x)) result = session.run(y, {x: np.float32(2)}) self.assertAllClose(result, np.float32(95), rtol=1e-3)
def MetadataHasXlaLaunch(run_metadata): """Returns true if there is a _XlaLaunch kernel in run_metadata's timeline.""" # TODO(phawkins): find a less hacky way to test whether a kernel ran. return InLabels(RunMetadataLabels(run_metadata), "_XlaLaunch")
def testLoopDeadlock(self): """Regression test for bug that caused deadlocks in graphs with loops.""" with self.test_session() as session: x = array_ops.placeholder(dtypes.float32) with jit_scope(): y = x + 1.0 c = lambda i, _x, _y: math_ops.less(i, 5) b = lambda i, x, _y: (i + 1, x * 2.0 + 1.0, x - 3.0) _, _, w = control_flow_ops.while_loop(c, b, (constant_op.constant(0), y, x)) u = w + y result = session.run(u, {x: np.float32(2)}) self.assertAllClose(result, np.float32(63), rtol=1e-1)
def training_graph(self, input_data, input_labels, data_spec=None, epoch=None, **tree_kwargs): """Constructs a TF graph for training a random forest. Args: input_data: A tensor or SparseTensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. data_spec: A list of tf.dtype values specifying the original types of each column. epoch: A tensor or placeholder for the epoch the training data comes from. **tree_kwargs: Keyword arguments passed to each tree's training_graph. Returns: The last op in the random forest training graph. """ data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec tree_graphs = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): seed = self.params.base_random_seed if seed != 0: seed += i # If using bagging, randomly select some of the input. tree_data = input_data tree_labels = input_labels if self.params.bagging_fraction < 1.0: # TODO(thomaswc): This does sampling without replacment. Consider # also allowing sampling with replacement as an option. batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1]) r = random_ops.random_uniform(batch_size, seed=seed) mask = math_ops.less( r, array_ops.ones_like(r) * self.params.bagging_fraction) gather_indices = array_ops.squeeze( array_ops.where(mask), squeeze_dims=[1]) # TODO(thomaswc): Calculate out-of-bag data and labels, and store # them for use in calculating statistics later. tree_data = array_ops.gather(input_data, gather_indices) tree_labels = array_ops.gather(input_labels, gather_indices) if self.params.bagged_features: tree_data = self._bag_features(i, tree_data) initialization = self.trees[i].tree_initialization() with ops.control_dependencies([initialization]): tree_graphs.append( self.trees[i].training_graph( tree_data, tree_labels, seed, data_spec=data_spec, epoch=([0] if epoch is None else epoch), **tree_kwargs)) return control_flow_ops.group(*tree_graphs, name='train')
def _conditional_batch(tensors, accept_prob, batch_size, queue_threads=10): """Conditionally enqueue tensors based on accept_prob. Specifically, enqueue the element if accept_prob > rand_unif([0, 1]). Args: tensors: List of tensors to enqueue. accept_prob: Acceptance probability per example. batch_size: Size of batch. queue_threads: Number of threads enqueuing in the final queue. Returns: List of batched tensors. Raises: ValueError: `accept_prob` isn't 0D. """ accept_prob.get_shape().assert_has_rank(0) # Determine shapes and types of to-be-enqueued-tensors. shapes_list = [] dtypes_list = [] for tensor in tensors: cur_shape = tensor.get_shape() cur_shape.assert_is_fully_defined() shapes_list.append(cur_shape) dtypes_list.append(tensor.dtype) final_q = data_flow_ops.FIFOQueue(capacity=batch_size, shapes=shapes_list, dtypes=dtypes_list, name='batched_queue') logging_ops.scalar_summary('queue/%s/size' % final_q.name, final_q.size()) # Conditionally enqueue. # Reshape enqueue op to match no_op's shape. eq_tf = math_ops.less(random_ops.random_uniform([]), accept_prob) conditional_enqueue = control_flow_ops.cond( eq_tf, lambda: final_q.enqueue(tensors), control_flow_ops.no_op) queue_runner.add_queue_runner(queue_runner.QueueRunner( final_q, [conditional_enqueue] * queue_threads)) out_tensor = final_q.dequeue_many(batch_size) # Queues return a single tensor if the list of enqued tensors is one. Since we # want the type to be the same in all cases, always return a list. if isinstance(out_tensor, ops.Tensor): out_tensor = [out_tensor] return out_tensor
def construct_rnn(initial_state, sequence_input, cell, num_label_columns, dtype=dtypes.float32, parallel_iterations=32, swap_memory=False): """Build an RNN and apply a fully connected layer to get the desired output. Args: initial_state: The initial state to pass the the RNN. If `None`, the default starting state for `self._cell` is used. sequence_input: A `Tensor` with shape `[batch_size, padded_length, d]` that will be passed as input to the RNN. cell: An initialized `RNNCell`. num_label_columns: The desired output dimension. dtype: dtype of `cell`. parallel_iterations: Number of iterations to run in parallel. Values >> 1 use more memory but take less time, while smaller values use less memory but computations take longer. swap_memory: Transparently swap the tensors produced in forward inference but needed for back prop from GPU to CPU. This allows training RNNs which would typically not fit on a single GPU, with very minimal (or no) performance penalty. Returns: activations: The output of the RNN, projected to `num_label_columns` dimensions. final_state: The final state output by the RNN. """ with ops.name_scope('RNN'): rnn_outputs, final_state = rnn.dynamic_rnn( cell=cell, inputs=sequence_input, initial_state=initial_state, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=False) activations = layers.fully_connected( inputs=rnn_outputs, num_outputs=num_label_columns, activation_fn=None, trainable=True) return activations, final_state
def training_graph(self, input_data, input_labels, data_spec=None, **tree_kwargs): """Constructs a TF graph for training a random forest. Args: input_data: A tensor or SparseTensor or placeholder for input data. input_labels: A tensor or placeholder for labels associated with input_data. data_spec: A list of tf.dtype values specifying the original types of each column. **tree_kwargs: Keyword arguments passed to each tree's training_graph. Returns: The last op in the random forest training graph. """ data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec tree_graphs = [] for i in range(self.params.num_trees): with ops.device(self.device_assigner.get_device(i)): seed = self.params.base_random_seed if seed != 0: seed += i # If using bagging, randomly select some of the input. tree_data = input_data tree_labels = input_labels if self.params.bagging_fraction < 1.0: # TODO(thomaswc): This does sampling without replacment. Consider # also allowing sampling with replacement as an option. batch_size = array_ops.slice(array_ops.shape(input_data), [0], [1]) r = random_ops.random_uniform(batch_size, seed=seed) mask = math_ops.less( r, array_ops.ones_like(r) * self.params.bagging_fraction) gather_indices = array_ops.squeeze( array_ops.where(mask), squeeze_dims=[1]) # TODO(thomaswc): Calculate out-of-bag data and labels, and store # them for use in calculating statistics later. tree_data = array_ops.gather(input_data, gather_indices) tree_labels = array_ops.gather(input_labels, gather_indices) if self.params.bagged_features: tree_data = self._bag_features(i, tree_data) initialization = self.trees[i].tree_initialization() with ops.control_dependencies([initialization]): tree_graphs.append( self.trees[i].training_graph( tree_data, tree_labels, seed, data_spec=data_spec, **tree_kwargs)) return control_flow_ops.group(*tree_graphs, name='train')
def test_keyed_features_filter(self): gfile.Glob = self._orig_glob lines = [ '{"features": {"feature": {"age": {"int64_list": {"value": [2]}}}}}', '{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}', '{"features": {"feature": {"age": {"int64_list": {"value": [1]}}}}}', '{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}', '{"features": {"feature": {"age": {"int64_list": {"value": [3]}}}}}', '{"features": {"feature": {"age": {"int64_list": {"value": [5]}}}}}' ] filename = self._create_temp_file("\n".join(lines)) batch_size = 2 queue_capacity = 4 name = "my_batch" features = {"age": parsing_ops.FixedLenFeature([], dtypes_lib.int64)} def filter_fn(keys, examples_json): del keys serialized = parsing_ops.decode_json_example(examples_json) examples = parsing_ops.parse_example(serialized, features) return math_ops.less(examples["age"], 2) with ops.Graph().as_default() as g, self.test_session(graph=g) as session: keys, inputs = graph_io._read_keyed_batch_examples_helper( filename, batch_size, reader=io_ops.TextLineReader, randomize_input=False, num_epochs=1, read_batch_size=batch_size, queue_capacity=queue_capacity, filter_fn=filter_fn, name=name) self.assertAllEqual((None,), keys.get_shape().as_list()) self.assertAllEqual((None,), inputs.get_shape().as_list()) session.run(variables.local_variables_initializer()) coord = coordinator.Coordinator() threads = queue_runner_impl.start_queue_runners(session, coord=coord) # First batch of two filtered examples. out_keys, out_vals = session.run((keys, inputs)) self.assertAllEqual( [filename.encode("utf-8") + b":2", filename.encode("utf-8") + b":3"], out_keys) self.assertAllEqual([lines[1].encode("utf-8"), lines[2].encode("utf-8")], out_vals) # Second batch will only have one filtered example as that's the only # remaining example that satisfies the filtering criterion. out_keys, out_vals = session.run((keys, inputs)) self.assertAllEqual([filename.encode("utf-8") + b":4"], out_keys) self.assertAllEqual([lines[3].encode("utf-8")], out_vals) # Exhausted input. with self.assertRaises(errors.OutOfRangeError): session.run((keys, inputs)) coord.request_stop() coord.join(threads)
def pick_vector(cond, true_vector, false_vector, name="pick_vector"): """Picks possibly different length row `Tensor`s based on condition. Value `Tensor`s should have exactly one dimension. If `cond` is a python Boolean or `tf.constant` then either `true_vector` or `false_vector` is immediately returned. I.e., no graph nodes are created and no validation happens. Args: cond: `Tensor`. Must have `dtype=tf.bool` and be scalar. true_vector: `Tensor` of one dimension. Returned when cond is `True`. false_vector: `Tensor` of one dimension. Returned when cond is `False`. name: `String`. The name to give this op. Example: ```python pick_vector(tf.less(0, 5), tf.range(10, 12), tf.range(15, 18)) # result is tensor: [10, 11]. pick_vector(tf.less(5, 0), tf.range(10, 12), tf.range(15, 18)) # result is tensor: [15, 16, 17].
Returns: true_or_false_vector: Tensor.
Tensor
Raises: TypeError: if cond.dtype != tf.bool TypeError: if cond is not a constant and true_vector.dtype != false_vector.dtype """ with ops.name_scope(name, values=(cond, true_vector, false_vector)): cond = ops.convert_to_tensor(cond, name="cond") if cond.dtype != dtypes.bool: raise TypeError("%s.dtype=%s which is not %s" % (cond.name, cond.dtype, dtypes.bool)) cond_value_static = tensor_util.constant_value(cond) if cond_value_static is not None: return true_vector if cond_value_static else false_vector true_vector = ops.convert_to_tensor(true_vector, name="true_vector") false_vector = ops.convert_to_tensor(false_vector, name="false_vector") if true_vector.dtype != false_vector.dtype: raise TypeError( "%s.dtype=%s does not match %s.dtype=%s" % (true_vector.name, true_vector.dtype, false_vector.name, false_vector.dtype)) n = array_ops.shape(true_vector)[0] return array_ops.slice( array_ops.concat((true_vector, false_vector), 0), [array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)]) ```
cond.dtype != tf.bool
cond
true_vector.dtype != false_vector.dtype
def softplus_inverse(x, name=None): """Computes the inverse softplus, i.e., x = softplus_inverse(softplus(x)). Mathematically this op is equivalent to: ```none softplus_inverse = log(exp(x) - 1.)
Args: x: Tensor. Non-negative (not enforced), floating-point. name: A name for the operation (optional).
Returns: Tensor. Has the same type/shape as input x. """ with ops.name_scope(name, "softplus_inverse", values=[x]): x = ops.convert_to_tensor(x, name="x")
x
# We begin by deriving a more numerically stable softplus_inverse: # x = softplus(y) = Log[1 + exp{y}], (which means x > 0). # ==> exp{x} = 1 + exp{y} (1) # ==> y = Log[exp{x} - 1] (2) # = Log[(exp{x} - 1) / exp{x}] + Log[exp{x}] # = Log[(1 - exp{-x}) / 1] + Log[exp{x}] # = Log[1 - exp{-x}] + x (3) # (2) is the "obvious" inverse, but (3) is more stable than (2) for large x. # For small x (e.g. x = 1e-10), (3) will become -inf since 1 - exp{-x} will # be zero. To fix this, we use 1 - exp{-x} approx x for small x > 0. # # In addition to the numerically stable derivation above, we clamp # small/large values to be congruent with the logic in: # tensorflow/core/kernels/softplus_op.h # # Finally, we set the input to one whenever the input is too large or too # small. This ensures that no unchosen codepath is +/- inf. This is # necessary to ensure the gradient doesn't get NaNs. Recall that the # gradient of `where` behaves like `pred*pred_true + (1-pred)*pred_false` # thus an `inf` in an unselected path results in `0*inf=nan`. We are careful # to overwrite `x` with ones only when we will never actually use this # value. Note that we use ones and not zeros since `log(expm1(0.)) = -inf`. threshold = np.log(np.finfo(x.dtype.as_numpy_dtype).eps) + 2. is_too_small = math_ops.less(x, np.exp(threshold)) is_too_large = math_ops.greater(x, -threshold) too_small_value = math_ops.log(x) too_large_value = x # This `where` will ultimately be a NOP because we won't select this # codepath whenever we used the surrogate `ones_like`. x = array_ops.where(math_ops.logical_or(is_too_small, is_too_large), array_ops.ones_like(x), x) y = x + math_ops.log(-math_ops.expm1(-x)) # == log(expm1(x)) return array_ops.where(is_too_small, too_small_value, array_ops.where(is_too_large, too_large_value, y))
```