我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.greater()。
def bin_stats(predictions: tf.Tensor, labels: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]: """ Calculate f1, precision and recall from binary classification expected and predicted values. :param predictions: 2-d tensor (batch, predictions) of predicted 0/1 classes :param labels: 2-d tensor (batch, labels) of expected 0/1 classes :return: a tuple of batched (f1, precision and recall) values """ predictions = tf.cast(predictions, tf.int32) labels = tf.cast(labels, tf.int32) true_positives = tf.reduce_sum((predictions * labels), axis=1) false_positives = tf.reduce_sum(tf.cast(tf.greater(predictions, labels), tf.int32), axis=1) false_negatives = tf.reduce_sum(tf.cast(tf.greater(labels, predictions), tf.int32), axis=1) recall = true_positives / (true_positives + false_negatives) precision = true_positives / (true_positives + false_positives) f1_score = 2 / (1 / precision + 1 / recall) return f1_score, precision, recall
def tabular_kl(p, q, zero_prob_value=0., logarg_clip=None): """Computes KL-divergence KL(p||q) for two probability mass functions (pmf) given in a tabular form. :param p: iterable :param q: iterable :param zero_prob_value: float; values below this threshold are treated as zero :param logarg_clip: float or None, clips the argument to the log to lie in [-logarg_clip, logarg_clip] if not None :return: iterable of brodcasted shape of (p * q), per-coordinate value of KL(p||q) """ p, q = (tf.cast(i, tf.float64) for i in (p, q)) non_zero = tf.greater(p, zero_prob_value) logarg = p / q if logarg_clip is not None: logarg = clip_preserve(logarg, 1. / logarg_clip, logarg_clip) log = masked_apply(logarg, tf.log, non_zero) kl = p * log return tf.cast(kl, tf.float32)
def cal_loss(self): one_hot_labels = tf.one_hot( self.labels, depth=self.conf.class_num, axis=self.channel_axis, name='labels/one_hot') losses = tf.losses.softmax_cross_entropy( one_hot_labels, self.predictions, scope='loss/losses') self.loss_op = tf.reduce_mean(losses, name='loss/loss_op') self.decoded_preds = tf.argmax( self.predictions, self.channel_axis, name='accuracy/decode_pred') correct_prediction = tf.equal( self.labels, self.decoded_preds, name='accuracy/correct_pred') self.accuracy_op = tf.reduce_mean( tf.cast(correct_prediction, tf.float32, name='accuracy/cast'), name='accuracy/accuracy_op') # weights = tf.cast( # tf.greater(self.decoded_preds, 0, name='m_iou/greater'), # tf.int32, name='m_iou/weights') weights = tf.cast( tf.less(self.labels, self.conf.channel, name='m_iou/greater'), tf.int64, name='m_iou/weights') labels = tf.multiply(self.labels, weights, name='m_iou/mul') self.m_iou, self.miou_op = tf.metrics.mean_iou( self.labels, self.decoded_preds, self.conf.class_num, weights, name='m_iou/m_ious')
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.], scope=None): """Filter out bounding boxes whose center are not in the rectangle [0, 0, 1, 1] + margins. The margin Tensor can be used to enforce or loosen this condition. Return: labels, bboxes: Filtered elements. """ with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]): cy = (bboxes[:, 0] + bboxes[:, 2]) / 2. cx = (bboxes[:, 1] + bboxes[:, 3]) / 2. mask = tf.greater(cy, margins[0]) mask = tf.logical_and(mask, tf.greater(cx, margins[1])) mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2])) mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3])) # Boolean masking... labels = tf.boolean_mask(labels, mask) bboxes = tf.boolean_mask(bboxes, mask) return labels, bboxes
def safe_div(numerator, denominator, name='safe_div'): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return tf.where( tf.greater(denominator, 0), tf.truediv(numerator, denominator), 0, name=name)
def insert(self, ids, scores): """Insert the ids and scores into the TopN.""" with tf.control_dependencies(self.last_ops): scatter_op = tf.scatter_update(self.id_to_score, ids, scores) larger_scores = tf.greater(scores, self.sl_scores[0]) def shortlist_insert(): larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores) larger_score_values = tf.boolean_mask(scores, larger_scores) shortlist_ids, new_ids, new_scores = self.ops.top_n_insert( self.sl_ids, self.sl_scores, larger_ids, larger_score_values) u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids) u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores) return tf.group(u1, u2) # We only need to insert into the shortlist if there are any # scores larger than the threshold. cond_op = tf.cond( tf.reduce_any(larger_scores), shortlist_insert, tf.no_op) with tf.control_dependencies([cond_op]): self.last_ops = [scatter_op, cond_op]
def build_training_process(self): wider_side_obj, wider_entropy = tf.cond( tf.greater(self.wider_seg_deeper, 0), lambda: self.get_wider_side_obj(), lambda: (tf.constant(0.0, dtype=tf.float32), tf.constant(0.0, dtype=tf.float32)) ) batch_size = array_ops.shape(self.reward)[0] deeper_side_obj, deeper_entropy = tf.cond( self.has_deeper, lambda: self.get_deeper_side_obj(), lambda: (tf.constant(0.0, dtype=tf.float32), tf.constant(0.0, dtype=tf.float32)) ) self.obj = wider_side_obj + deeper_side_obj entropy_term = wider_entropy * tf.cast(self.wider_seg_deeper, tf.float32) + \ deeper_entropy * tf.cast(batch_size - self.wider_seg_deeper, tf.float32) entropy_term /= tf.cast(batch_size, tf.float32) optimizer = BasicModel.build_optimizer(self.learning_rate, self.opt_config[0], self.opt_config[1]) self.train_step = optimizer.minimize(- self.obj - self.entropy_penalty * entropy_term)
def get_mu_tensor(self): const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef") coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) ) roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False) # filter out the correct root root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ), tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) ) # in case there are two duplicated roots satisfying the above condition root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] ) tf.assert_equal(tf.size(root), tf.constant(1) ) dr = self._h_max / self._h_min mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2) return mu
def find_dup(a): """ Find the duplicated elements in 1-D a tensor. Args: a: 1-D tensor. Return: more_than_one_vals: duplicated value in a. indexes_in_a: duplicated value's index in a. dups_in_a: duplicated value with duplicate in a. """ unique_a_vals, unique_idx = tf.unique(a) count_a_unique = tf.unsorted_segment_sum(tf.ones_like(a), unique_idx, tf.shape(a)[0]) more_than_one = tf.greater(count_a_unique, 1) more_than_one_idx = tf.squeeze(tf.where(more_than_one)) more_than_one_vals = tf.squeeze(tf.gather(unique_a_vals, more_than_one_idx)) not_duplicated, _ = tf.setdiff1d(a, more_than_one_vals) dups_in_a, indexes_in_a = tf.setdiff1d(a, not_duplicated) return more_than_one_vals, indexes_in_a, dups_in_a
def retrieve_seq_length_op2(data): """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], it can be used when the features of padding (on right hand side) are all zeros. Parameters ----------- data : tensor [batch_size, n_step(max)] with zero padding on right hand side. Examples -------- >>> data = [[1,2,0,0,0], ... [1,2,3,0,0], ... [1,2,6,1,0]] >>> o = retrieve_seq_length_op2(data) >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> print(o.eval()) ... [2 3 4] """ return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1)
def mle_loss(self, outputs, targets): '''Maximum likelihood estimation loss.''' present_mask = tf.greater(targets, 0, name='present_mask') # don't enfoce loss on true <unk>'s unk_mask = tf.not_equal(targets, self.vocab.unk_index, name='unk_mask') mask = tf.cast(tf.logical_and(present_mask, unk_mask), tf.float32) output = tf.reshape(tf.concat(1, outputs), [-1, cfg.hidden_size]) if self.training and cfg.softmax_samples < len(self.vocab.vocab): targets = tf.reshape(targets, [-1, 1]) mask = tf.reshape(mask, [-1]) loss = tf.nn.sampled_softmax_loss(self.softmax_w, self.softmax_b, output, targets, cfg.softmax_samples, len(self.vocab.vocab)) loss *= mask else: logits = tf.nn.bias_add(tf.matmul(output, tf.transpose(self.softmax_w), name='softmax_transform_mle'), self.softmax_b) loss = tf.nn.seq2seq.sequence_loss_by_example([logits], [tf.reshape(targets, [-1])], [tf.reshape(mask, [-1])]) return tf.reshape(loss, [cfg.batch_size, -1])
def atan2(x, y, epsilon = 1.0e-12): """ A hack until the TensorFlow developers implement a function that can find the angle from an x and y co- ordinate. :param x: :param epsilon: :return: """ # Add a small number to all zeros, to avoid division by zero: x = tf.where(tf.equal(x, 0.0), x + epsilon, x) y = tf.where(tf.equal(y, 0.0), y + epsilon, y) angle = tf.where(tf.greater(x, 0.0), tf.atan(y / x), tf.zeros_like(x)) angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.greater_equal(y, 0.0)), tf.atan(y / x) + np.pi, angle) angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.less(y, 0.0)), tf.atan(y / x) - np.pi, angle) angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.greater(y, 0.0)), 0.5 * np.pi * tf.ones_like(x), angle) angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.less(y, 0.0)), -0.5 * np.pi * tf.ones_like(x), angle) angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)), tf.zeros_like(x), angle) return angle # List of faces for consistent ordering.
def get_probs_and_accuracy(preds,O): """ helper function. we have a prediction for each MC sample of each observation in this batch. need to distill the multiple preds from each MC into a single pred for this observation. also get accuracy. use true probs to get ROC, PR curves in sklearn """ all_probs = tf.exp(preds[:,1] - tf.reduce_logsumexp(preds, axis = 1)) #normalize; and drop a dim so only prob of positive case N = tf.cast(tf.shape(preds)[0]/n_mc_smps,tf.int32) #actual number of observations in preds, collapsing MC samples #predicted probability per observation; collapse the MC samples probs = tf.zeros([0]) #store all samples in a list, then concat into tensor at end #setup tf while loop (have to use this bc loop size is variable) def cond(i,probs): return i < N def body(i,probs): probs = tf.concat([probs,[tf.reduce_mean(tf.slice(all_probs,[i*n_mc_smps],[n_mc_smps]))]],0) return i+1,probs i = tf.constant(0) i,probs = tf.while_loop(cond,body,loop_vars=[i,probs],shape_invariants=[i.get_shape(),tf.TensorShape([None])]) #compare to truth; just use cutoff of 0.5 for right now to get accuracy correct_pred = tf.equal(tf.cast(tf.greater(probs,0.5),tf.int32), O) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) return probs,accuracy
def retrieve_seq_length_op2(data): """An op to compute the length of a sequence, from input shape of [batch_size, n_step(max)], it can be used when the features of padding (on right hand side) are all zeros. Parameters ----------- data : tensor [batch_size, n_step(max)] with zero padding on right hand side. Examples -------- >>> data = [[1,2,0,0,0], ... [1,2,3,0,0], ... [1,2,6,1,0]] >>> o = retrieve_seq_length_op2(data) >>> sess = tf.InteractiveSession() >>> tl.layers.initialize_global_variables(sess) >>> print(o.eval()) ... [2 3 4] """ return tf.reduce_sum(tf.cast(tf.greater(data, tf.zeros_like(data)), tf.int32), 1) # Dynamic RNN
def _apply_prune_on_grads(self, grads_and_vars: list, threshold: float): # we need to make gradients correspondent # to the pruned weights to be zero grads_and_vars_sparse = [] for grad, var in grads_and_vars: if 'weights' in var.name: small_weights = tf.greater(threshold, tf.abs(var)) mask = tf.cast(tf.logical_not(small_weights), tf.float32) grad = grad * mask grads_and_vars_sparse.append((grad, var)) return grads_and_vars_sparse
def test_gather_with_dynamic_indexing(self): corners = tf.constant([4 * [0.0], 4 * [1.0], 4 * [2.0], 4 * [3.0], 4 * [4.0] ]) weights = tf.constant([.5, .3, .7, .1, .9], tf.float32) indices = tf.reshape(tf.where(tf.greater(weights, 0.4)), [-1]) expected_subset = [4 * [0.0], 4 * [2.0], 4 * [4.0]] expected_weights = [.5, .7, .9] boxes = box_list.BoxList(corners) boxes.add_field('weights', weights) subset = box_list_ops.gather(boxes, indices, ['weights']) with self.test_session() as sess: subset_output, weights_output = sess.run([subset.get(), subset.get_field( 'weights')]) self.assertAllClose(subset_output, expected_subset) self.assertAllClose(weights_output, expected_weights)
def test_visualize_boxes_in_image(self): image = tf.zeros((6, 4, 3)) corners = tf.constant([[0, 0, 5, 3], [0, 0, 3, 2]], tf.float32) boxes = box_list.BoxList(corners) image_and_boxes = box_list_ops.visualize_boxes_in_image(image, boxes) image_and_boxes_bw = tf.to_float( tf.greater(tf.reduce_sum(image_and_boxes, 2), 0.0)) exp_result = [[1, 1, 1, 0], [1, 1, 1, 0], [1, 1, 1, 0], [1, 0, 1, 0], [1, 1, 1, 0], [0, 0, 0, 0]] with self.test_session() as sess: output = sess.run(image_and_boxes_bw) self.assertAllEqual(output.astype(int), exp_result)
def _padded_batched_proposals_indicator(self, num_proposals, max_num_proposals): """Creates indicator matrix of non-pad elements of padded batch proposals. Args: num_proposals: Tensor of type tf.int32 with shape [batch_size]. max_num_proposals: Maximum number of proposals per image (integer). Returns: A Tensor of type tf.bool with shape [batch_size, max_num_proposals]. """ batch_size = tf.size(num_proposals) tiled_num_proposals = tf.tile( tf.expand_dims(num_proposals, 1), [1, max_num_proposals]) tiled_proposal_index = tf.tile( tf.expand_dims(tf.range(max_num_proposals), 0), [batch_size, 1]) return tf.greater(tiled_num_proposals, tiled_proposal_index)
def filter_groundtruth_with_nan_box_coordinates(tensor_dict): """Filters out groundtruth with no bounding boxes. Args: tensor_dict: a dictionary of following groundtruth tensors - fields.InputDataFields.groundtruth_boxes fields.InputDataFields.groundtruth_classes fields.InputDataFields.groundtruth_is_crowd fields.InputDataFields.groundtruth_area fields.InputDataFields.groundtruth_label_types Returns: a dictionary of tensors containing only the groundtruth that have bounding boxes. """ groundtruth_boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes] nan_indicator_vector = tf.greater(tf.reduce_sum(tf.to_int32( tf.is_nan(groundtruth_boxes)), reduction_indices=[1]), 0) valid_indicator_vector = tf.logical_not(nan_indicator_vector) valid_indices = tf.where(valid_indicator_vector) return retain_groundtruth(tensor_dict, valid_indices)
def pad_tensor(t, length): """Pads the input tensor with 0s along the first dimension up to the length. Args: t: the input tensor, assuming the rank is at least 1. length: a tensor of shape [1] or an integer, indicating the first dimension of the input tensor t after padding, assuming length <= t.shape[0]. Returns: padded_t: the padded tensor, whose first dimension is length. If the length is an integer, the first dimension of padded_t is set to length statically. """ t_rank = tf.rank(t) t_shape = tf.shape(t) t_d0 = t_shape[0] pad_d0 = tf.expand_dims(length - t_d0, 0) pad_shape = tf.cond( tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0), lambda: tf.expand_dims(length - t_d0, 0)) padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0) if not _is_tensor(length): padded_t = _set_dim_0(padded_t, length) return padded_t
def pad_or_clip_tensor(t, length): """Pad or clip the input tensor along the first dimension. Args: t: the input tensor, assuming the rank is at least 1. length: a tensor of shape [1] or an integer, indicating the first dimension of the input tensor t after processing. Returns: processed_t: the processed tensor, whose first dimension is length. If the length is an integer, the first dimension of the processed tensor is set to length statically. """ processed_t = tf.cond( tf.greater(tf.shape(t)[0], length), lambda: clip_tensor(t, length), lambda: pad_tensor(t, length)) if not _is_tensor(length): processed_t = _set_dim_0(processed_t, length) return processed_t
def separation_loss(tf_prediction_serial, tf_interactions_serial, **kwargs): """ This loss function models the explicit positive and negative interaction predictions as normal distributions and returns the probability of overlap between the two distributions. :param tf_prediction_serial: :param tf_interactions_serial: :return: """ tf_positive_mask = tf.greater(tf_interactions_serial, 0.0) tf_negative_mask = tf.less_equal(tf_interactions_serial, 0.0) tf_positive_predictions = tf.boolean_mask(tf_prediction_serial, tf_positive_mask) tf_negative_predictions = tf.boolean_mask(tf_prediction_serial, tf_negative_mask) tf_pos_mean, tf_pos_var = tf.nn.moments(tf_positive_predictions, axes=[0]) tf_neg_mean, tf_neg_var = tf.nn.moments(tf_negative_predictions, axes=[0]) tf_overlap_distribution = tf.contrib.distributions.Normal(loc=(tf_neg_mean - tf_pos_mean), scale=tf.sqrt(tf_neg_var + tf_pos_var)) loss = 1.0 - tf_overlap_distribution.cdf(0.0) return loss
def create_model(self, model_input, vocab_size, num_frames, **unused_params): shape = model_input.get_shape().as_list() frames_sum = tf.reduce_sum(tf.abs(model_input),axis=2) frames_true = tf.ones(tf.shape(frames_sum)) frames_false = tf.zeros(tf.shape(frames_sum)) frames_bool = tf.reshape(tf.where(tf.greater(frames_sum, frames_false), frames_true, frames_false),[-1,shape[1],1]) activation_1 = tf.reduce_max(model_input, axis=1) activation_2 = tf.reduce_sum(model_input*frames_bool, axis=1)/(tf.reduce_sum(frames_bool, axis=1)+1e-6) activation_3 = tf.reduce_min(model_input, axis=1) model_input_1, final_probilities_1 = self.sub_moe(activation_1,vocab_size,scopename="_max") model_input_2, final_probilities_2 = self.sub_moe(activation_2,vocab_size,scopename="_mean") model_input_3, final_probilities_3 = self.sub_moe(activation_3,vocab_size,scopename="_min") final_probilities = tf.stack((final_probilities_1,final_probilities_2,final_probilities_3),axis=1) weight2d = tf.get_variable("ensemble_weight2d", shape=[shape[2], 3, vocab_size], regularizer=slim.l2_regularizer(1.0e-8)) activations = tf.stack((model_input_1, model_input_2, model_input_3), axis=2) weight = tf.nn.softmax(tf.einsum("aij,ijk->ajk", activations, weight2d), dim=1) result = {} result["prediction_frames"] = tf.reshape(final_probilities,[-1,vocab_size]) result["predictions"] = tf.reduce_sum(final_probilities*weight,axis=1) return result
def __call__(self, inputs, state, scope=None): with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer): U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block], initializer=self._recurrent_initializer) V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block], initializer=self._recurrent_initializer) W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block], initializer=self._recurrent_initializer) U_bias = tf.get_variable('U_bias', [self._num_units_per_block]) # Split the hidden state into blocks (each U, V, W are shared across blocks). state = tf.split(state, self._num_blocks, axis=1) next_states = [] for j, state_j in enumerate(state): # Hidden State (j) key_j = tf.expand_dims(self._keys[j], axis=0) gate_j = self.get_gate(state_j, key_j, inputs) candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, U_bias) # Equation 4: h_j <- h_j + g_j * h_j^~ # Perform an update of the hidden state (memory). state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j # Equation 5: h_j <- h_j / \norm{h_j} # Forget previous memories by normalization. state_j_next_norm = tf.norm( tensor=state_j_next, ord='euclidean', axis=-1, keep_dims=True) state_j_next_norm = tf.where( tf.greater(state_j_next_norm, 0.0), state_j_next_norm, tf.ones_like(state_j_next_norm)) state_j_next = state_j_next / state_j_next_norm next_states.append(state_j_next) state_next = tf.concat(next_states, axis=1) return state_next, state_next
def greater(x, y): '''Element-wise truth value of (x > y). Returns a bool tensor. ''' return tf.greater(x, y)
def nms(self, localization, confidence, tiling): good_bboxes = decode_bboxes(localization, tiling) not_crap_mask = tf.reduce_max(confidence[:, 1:], axis=-1) >= args.conf_thresh good_bboxes = tf.boolean_mask(good_bboxes, not_crap_mask) confidence = tf.boolean_mask(confidence, not_crap_mask) self.detection_list = [] self.score_list = [] for i in range(1, self.loader.num_classes): class_mask = tf.greater(confidence[:, i], args.conf_thresh) class_scores = tf.boolean_mask(confidence[:, i], class_mask) class_bboxes = tf.boolean_mask(good_bboxes, class_mask) K = tf.minimum(tf.size(class_scores), args.top_k_nms) _, top_k_inds = tf.nn.top_k(class_scores, K) top_class_scores = tf.gather(class_scores, top_k_inds) top_class_bboxes = tf.gather(class_bboxes, top_k_inds) final_inds = tf.image.non_max_suppression(top_class_bboxes, top_class_scores, max_output_size=args.top_k_after_nms, iou_threshold=args.nms_thresh) final_class_bboxes = tf.gather(top_class_bboxes, final_inds) final_scores = tf.gather(top_class_scores, final_inds) self.detection_list.append(final_class_bboxes) self.score_list.append(final_scores)
def binary(config, gan, net): net = tf.greater(net, 0) net = tf.cast(net, tf.float32) return net
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False): eigen_min = tf.reduce_min(input_mat) eigen_max = tf.reduce_max(input_mat) eigen_ratio = eigen_max / eigen_min input_mat_clipped = clipoutNeg(input_mat, threshold) if debug: input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print( input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio])) return input_mat_clipped
def f1_score(precision, recall): """Creates an op for calculating the F1 score. Args: precision: A tensor representing precision. recall: A tensor representing recall. Returns: A tensor with the result of the F1 calculation. """ return tf.where( tf.greater(precision + recall, 0), 2 * ( (precision * recall) / (precision + recall)), 0)
def accuracy_without_true_negatives(true_positives, false_positives, false_negatives): """Creates an op for calculating accuracy without true negatives. Args: true_positives: A tensor representing true_positives. false_positives: A tensor representing false_positives. false_negatives: A tensor representing false_negatives. Returns: A tensor with the result of the calculation. """ return tf.where( tf.greater(true_positives + false_positives + false_negatives, 0), true_positives / (true_positives + false_positives + false_negatives), 0)
def _frame_metrics(frame_labels, frame_predictions): """Calculate frame-based metrics.""" frame_labels_bool = tf.cast(frame_labels, tf.bool) frame_predictions_bool = tf.cast(frame_predictions, tf.bool) frame_true_positives = tf.reduce_sum(tf.to_float(tf.logical_and( tf.equal(frame_labels_bool, True), tf.equal(frame_predictions_bool, True)))) frame_false_positives = tf.reduce_sum(tf.to_float(tf.logical_and( tf.equal(frame_labels_bool, False), tf.equal(frame_predictions_bool, True)))) frame_false_negatives = tf.reduce_sum(tf.to_float(tf.logical_and( tf.equal(frame_labels_bool, True), tf.equal(frame_predictions_bool, False)))) frame_accuracy = tf.reduce_sum(tf.to_float( tf.equal(frame_labels_bool, frame_predictions_bool))) frame_precision = tf.where( tf.greater(frame_true_positives + frame_false_positives, 0), tf.div(frame_true_positives, frame_true_positives + frame_false_positives), 0) frame_recall = tf.where( tf.greater(frame_true_positives + frame_false_negatives, 0), tf.div(frame_true_positives, frame_true_positives + frame_false_negatives), 0) frame_f1_score = f1_score(frame_precision, frame_recall) frame_accuracy_without_true_negatives = accuracy_without_true_negatives( frame_true_positives, frame_false_positives, frame_false_negatives) return { 'true_positives': frame_true_positives, 'false_positives': frame_false_positives, 'false_negatives': frame_false_negatives, 'accuracy': frame_accuracy, 'accuracy_without_true_negatives': frame_accuracy_without_true_negatives, 'precision': frame_precision, 'recall': frame_recall, 'f1_score': frame_f1_score, }
def calc_errors(self, output): Z = output['numbers'] N = tf.reduce_sum(tf.cast(tf.greater(Z, 0), tf.float32), 1) tgt = output[self.target] pred = output[self.prediction] if self.idx is not None: tgt = tgt[:, self.idx] pred = pred[:, self.idx] return tf.abs(tgt - pred) / N
def calc_errors(self, output): Z = output['numbers'] N = tf.reduce_sum(tf.cast(tf.greater(Z, 0), tf.float32), 1) tgt = output[self.target] pred = output[self.prediction] if self.idx is not None: tgt = tgt[:, self.idx] pred = pred[:, self.idx] return ((tgt - pred) / N) ** 2
def __gt__(self, other): return tf.greater(self, other)
def _embed_sequence_with_length(self, embeddings, input_text): # calculate max length of the input_text mask_words = tf.greater(input_text, 0) # true for words false for padding words_length = tf.reduce_sum(tf.cast(mask_words, tf.int32), -1) mask_sentences = tf.greater(words_length, 0) sentences_length = tf.reduce_sum(tf.cast(mask_sentences, tf.int32), 1) input_text = tf.add(input_text, 1) embedded_sequence = tf.nn.embedding_lookup(embeddings, input_text) return embedded_sequence, sentences_length, words_length
def __call__(self, inputs, state, scope=None): with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer): # Split the hidden state into blocks (each U, V, W are shared across blocks). U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block]) V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block]) W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block]) b = tf.get_variable('biasU',[self._num_units_per_block]) state = tf.split(state, self._num_blocks, 1) next_states = [] for j, state_j in enumerate(state): # Hidden State (j) key_j = self._keys[j] gate_j = self.get_gate(state_j, key_j, inputs) candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, b) # Equation 4: h_j <- h_j + g_j * h_j^~ # Perform an update of the hidden state (memory). state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j # # Forget previous memories by normalization. state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary? # Equation 5: h_j <- h_j / \norm{h_j} # Forget previous memories by normalization. # state_j_next_norm = tf.norm(tensor=state_j_next, # ord='euclidean', # axis=-1, # keep_dims=True) # state_j_next_norm = tf.where( # tf.greater(state_j_next_norm, 0.0), # state_j_next_norm, # tf.ones_like(state_j_next_norm)) # state_j_next = state_j_next / state_j_next_norm next_states.append(state_j_next) state_next = tf.concat(next_states, 1) return state_next, state_next
def __call__(self, inputs, state, scope=None): with tf.variable_scope(scope or type(self).__name__, initializer=self._initializer): # Split the hidden state into blocks (each U, V, W are shared across blocks). U = tf.get_variable('U', [self._num_units_per_block, self._num_units_per_block]) V = tf.get_variable('V', [self._num_units_per_block, self._num_units_per_block]) W = tf.get_variable('W', [self._num_units_per_block, self._num_units_per_block]) b = tf.get_variable('biasU',[self._num_units_per_block]) state = tf.split(state, self._num_blocks, 1) next_states = [] for j, state_j in enumerate(state): # Hidden State (j) key_j = self._keys[j] gate_j = self.get_gate(state_j, key_j, inputs) candidate_j = self.get_candidate(state_j, key_j, inputs, U, V, W, b) # Equation 4: h_j <- h_j + g_j * h_j^~ # Perform an update of the hidden state (memory). state_j_next = state_j + tf.expand_dims(gate_j, -1) * candidate_j # # Forget previous memories by normalization. # Equation 5: h_j <- h_j / \norm{h_j} state_j_next = tf.nn.l2_normalize(state_j_next, -1) # TODO: Is epsilon necessary? # Forget previous memories by normalization. # state_j_next_norm = tf.norm(tensor=state_j_next, # ord='euclidean', # axis=-1, # keep_dims=True) # state_j_next_norm = tf.where( # tf.greater(state_j_next_norm, 0.0), # state_j_next_norm, # tf.ones_like(state_j_next_norm)) # state_j_next = state_j_next / state_j_next_norm next_states.append(state_j_next) state_next = tf.concat(next_states, 1) return state_next, state_next
def get_eval_ops(logits, labels, one_hot=False, scope='', calc_accuracy=True): """Evaluate the quality of the logits at predicting the label. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size], with values in the range [0, NUM_CLASSES). Returns: A scalar int32 tensor with the number of examples (out of batch_size) that were predicted correctly. """ print('Evaluation Ops..') with tf.name_scope(scope): # For a classifier model, we can use the in_top_k Op. # It returns a bool tensor with shape [batch_size] that is true for # the examples where the label's is was in the top k (here k=1) # of all logits for that example. # labels = tf.cast(labels, tf.int64) if one_hot: labels = tf.argmax(labels, 1) top_1_op = tf.nn.in_top_k(logits, labels, 1) num_correct = tf.reduce_sum(tf.cast(top_1_op, tf.float32)) if calc_accuracy: acc_percent = tf.divide(num_correct, labels.shape[0].value) else: acc_percent = tf.constant(0.0) # ============= y_const = tf.constant(-1, dtype=labels.dtype) y_greater = tf.greater(labels, y_const) n_all = tf.reduce_sum(tf.cast(y_greater, tf.float32)) return top_1_op, acc_percent * 100.0, num_correct, n_all, labels ########################################################################
def huber_loss(x, delta=1): coef = 0.5 l2_mask = tf.less_equal(tf.abs(x), delta) l1_mask = tf.greater(tf.abs(x), delta) term_1 = tf.reduce_sum(coef * tf.square(tf.boolean_mask(x, l2_mask))) term_2 = tf.reduce_sum(delta * (tf.abs(tf.boolean_mask(x, l1_mask)) - coef * delta)) return term_1 + term_2
def _box_params_loss(self, ground_truth, ground_truth_num, anchor_centers, offsets, proposals_num): # ground_truth shape is M x 4, where M is count and 4 are y,x,h,w ground_truth = tf.expand_dims(ground_truth, axis=0) ground_truth = tf.tile(ground_truth, [proposals_num, 1, 1]) # anchor_centers shape is N x 4 where N is count and 4 are ya,xa,ha,wa anchor_centers = tf.expand_dims(anchor_centers, axis=1) anchor_centers = tf.tile(anchor_centers, [1, ground_truth_num, 1]) # pos_sample_mask shape is N x M, True are for positive proposals and, hence, # for anchor centers pos_sample_mask = tf.greater(self.iou_metric, 0.7) # convert mask shape from N to N x 1 to make it broadcastable with pos_sample_mask mask = tf.expand_dims(self.cross_boundary_mask, axis=1) # convert resulting shape to align it with offsets mask = tf.expand_dims(tf.cast(pos_sample_mask & mask, tf.float32), axis=2) y_anchor, x_anchor, height_anchor, width_anchor = tf.unstack(anchor_centers, axis=2) y_ground_truth, x_ground_truth, height_ground_truth, width_ground_truth = tf.unstack( ground_truth, axis=2) # idea is to calculate N x M tx, ty, tw, th for ground truth boxes # for every proposal. Then we caclulate loss, multiply it with mask # to filter out non-positive samples and sum to one # each shape is N x M tx_ground_truth = (x_ground_truth - x_anchor) / width_anchor ty_ground_truth = (y_ground_truth - y_anchor) / height_anchor tw_ground_truth = tf.log(width_ground_truth / width_anchor) th_ground_truth = tf.log(height_ground_truth / height_anchor) gt_params = tf.stack( [ty_ground_truth, tx_ground_truth, th_ground_truth, tw_ground_truth], axis=2) offsets = tf.expand_dims(offsets, axis=1) offsets = tf.tile(offsets, [1, ground_truth_num, 1]) return huber_loss((offsets - gt_params) * mask)
def _safe_div(numerator, denominator, name): """Divides two values, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. name: Name for the returned op. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ return tf.where( math_ops.greater(denominator, 0), math_ops.divide(numerator, denominator), tf.zeros_like(numerator), name=name)
def tf_ssd_bboxes_select_layer_all_classes(predictions_layer, localizations_layer, select_threshold=None): """Extract classes, scores and bounding boxes from features in one layer. Batch-compatible: inputs are supposed to have batch-type shapes. Args: predictions_layer: A SSD prediction layer; localizations_layer: A SSD localization layer; select_threshold: Classification threshold for selecting a box. If None, select boxes whose classification score is higher than 'no class'. Return: classes, scores, bboxes: Input Tensors. """ # Reshape features: Batches x N x N_labels | 4 p_shape = tfe.get_shape(predictions_layer) predictions_layer = tf.reshape(predictions_layer, tf.stack([p_shape[0], -1, p_shape[-1]])) l_shape = tfe.get_shape(localizations_layer) localizations_layer = tf.reshape(localizations_layer, tf.stack([l_shape[0], -1, l_shape[-1]])) # Boxes selection: use threshold or score > no-label criteria. if select_threshold is None or select_threshold == 0: # Class prediction and scores: assign 0. to 0-class classes = tf.argmax(predictions_layer, axis=2) scores = tf.reduce_max(predictions_layer, axis=2) scores = scores * tf.cast(classes > 0, scores.dtype) else: sub_predictions = predictions_layer[:, :, 1:] classes = tf.argmax(sub_predictions, axis=2) + 1 scores = tf.reduce_max(sub_predictions, axis=2) # Only keep predictions higher than threshold. mask = tf.greater(scores, select_threshold) classes = classes * tf.cast(mask, classes.dtype) scores = scores * tf.cast(mask, scores.dtype) # Assume localization layer already decoded. bboxes = localizations_layer return classes, scores, bboxes
def _fit(tensor, width): actual = tf.shape(tensor)[-1] result = tf.cond(tf.greater(actual, width), lambda: _trim(tensor, width), lambda: _pad(tensor, width)) return result
def gt(self, x, y): return tf.greater(x, y)