我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.less()。
def _leapfrog(self, q, p, step_size, get_gradient, mass): def loop_cond(i, q, p): return i < self.n_leapfrogs + 1 def loop_body(i, q, p): step_size1 = tf.cond(i > 0, lambda: step_size, lambda: tf.constant(0.0, dtype=tf.float32)) step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs), tf.less(0, i)), lambda: step_size, lambda: step_size / 2) q, p = leapfrog_integrator(q, p, step_size1, step_size2, lambda q: get_gradient(q), mass) return [i + 1, q, p] i = tf.constant(0) _, q, p = tf.while_loop(loop_cond, loop_body, [i, q, p], back_prop=False, parallel_iterations=1) return q, p
def _smooth_l1_loss(self, bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]): sigma_2 = sigma ** 2 box_diff = bbox_pred - bbox_targets in_box_diff = bbox_inside_weights * box_diff abs_in_box_diff = tf.abs(in_box_diff) smoothL1_sign = tf.stop_gradient(tf.to_float(tf.less(abs_in_box_diff, 1. / sigma_2))) in_loss_box = tf.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \ + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign) out_loss_box = bbox_outside_weights * in_loss_box loss_box = tf.reduce_mean(tf.reduce_sum( out_loss_box, axis=dim )) return loss_box
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn): def leapfrog(pos, vel, step, i): de_dp_ = tf.gradients(tf.reduce_sum(energy_fn(pos)), pos)[0] new_vel_ = vel - step * de_dp_ new_pos_ = pos + step * new_vel_ return [new_pos_, new_vel_, step, tf.add(i, 1)] def condition(pos, vel, step, i): return tf.less(i, n_steps) de_dp = tf.gradients(tf.reduce_sum(energy_fn(initial_pos)), initial_pos)[0] vel_half_step = initial_vel - 0.5 * stepsize * de_dp pos_full_step = initial_pos + stepsize * vel_half_step i = tf.constant(0) final_pos, new_vel, _, _ = tf.while_loop(condition, leapfrog, [pos_full_step, vel_half_step, stepsize, i]) de_dp = tf.gradients(tf.reduce_sum(energy_fn(final_pos)), final_pos)[0] final_vel = new_vel - 0.5 * stepsize * de_dp return final_pos, final_vel
def generate_mask(img_mask_list, h, w, l): img_masks, loss_masks = [], [] for i in range(l): # generate image mask img_mask = img_mask_list[i] img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32) img_mask = tf.reshape(img_mask, (h, w)) img_masks.append(img_mask) # generate loss mask s_total = h * w s_mask = tf.reduce_sum(img_mask) def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1 def f2(): return tf.zeros_like(img_mask) def f3(): return tf.ones_like(img_mask) loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \ (tf.less(s_mask, s_total/2), f1)], default=f3) loss_masks.append(loss_mask) return tf.stack(img_masks), tf.stack(loss_masks)
def _adapt_mass(self, t, num_chain_dims): ewmv = ExponentialWeightedMovingVariance( self.mass_decay, self.data_shapes, num_chain_dims) new_mass = tf.cond(self.adapt_mass, lambda: ewmv.get_updated_precision(self.q), lambda: ewmv.precision()) if not isinstance(new_mass, list): new_mass = [new_mass] # print('New mass is = {}'.format(new_mass)) # TODO incorrect shape? # print('New mass={}'.format(new_mass)) # print('q={}, NMS={}'.format(self.q[0].get_shape(), # new_mass[0].get_shape())) with tf.control_dependencies(new_mass): current_mass = tf.cond( tf.less(tf.to_int32(t), self.mass_collect_iters), lambda: [tf.ones(shape) for shape in self.data_shapes], lambda: new_mass) if not isinstance(current_mass, list): current_mass = [current_mass] return current_mass
def __init__(self, num_anchors, config, seed=None, name='anchor_target'): super(RPNTarget, self).__init__(name=name) self._num_anchors = num_anchors self._allowed_border = config.allowed_border # We set clobber positive to False to make sure that there is always at # least one positive anchor per GT box. self._clobber_positives = config.clobber_positives # We set anchors as positive when the IoU is greater than # `positive_overlap`. self._positive_overlap = config.foreground_threshold # We set anchors as negative when the IoU is less than # `negative_overlap`. self._negative_overlap = config.background_threshold_high # Fraction of the batch to be foreground labeled anchors. self._foreground_fraction = config.foreground_fraction self._minibatch_size = config.minibatch_size # When choosing random targets use `seed` to replicate behaviour. self._seed = seed
def cal_loss(self): one_hot_labels = tf.one_hot( self.labels, depth=self.conf.class_num, axis=self.channel_axis, name='labels/one_hot') losses = tf.losses.softmax_cross_entropy( one_hot_labels, self.predictions, scope='loss/losses') self.loss_op = tf.reduce_mean(losses, name='loss/loss_op') self.decoded_preds = tf.argmax( self.predictions, self.channel_axis, name='accuracy/decode_pred') correct_prediction = tf.equal( self.labels, self.decoded_preds, name='accuracy/correct_pred') self.accuracy_op = tf.reduce_mean( tf.cast(correct_prediction, tf.float32, name='accuracy/cast'), name='accuracy/accuracy_op') # weights = tf.cast( # tf.greater(self.decoded_preds, 0, name='m_iou/greater'), # tf.int32, name='m_iou/weights') weights = tf.cast( tf.less(self.labels, self.conf.channel, name='m_iou/greater'), tf.int64, name='m_iou/weights') labels = tf.multiply(self.labels, weights, name='m_iou/mul') self.m_iou, self.miou_op = tf.metrics.mean_iou( self.labels, self.decoded_preds, self.conf.class_num, weights, name='m_iou/m_ious')
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob): y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0]) y0 = tf.to_int32(tf.greater(y0, 0.5)) def _cond(i, z): xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max) y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0]) y = tf.to_int32(tf.greater(y, 0.5)) return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y)) def _body(i, z): xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max) y = tf.reshape(model(xadv), [-1])[0] g = tf.gradients(y, xadv)[0] dx = - y * g / tf.norm(g) return i+1, z+dx _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)], name='_deepfool2_impl', back_prop=False) return noise
def stochastical_binarize_gradients(grads_and_vars, scalers): """Stochastically binarize gradients.""" gradients, variables = zip(*grads_and_vars) binarized_gradients = [] for gradient, scaler in zip(gradients, scalers): if gradient is None: binarized_gradients.append(None) continue if isinstance(gradient, tf.IndexedSlices): gradient_shape = gradient.dense_shape else: gradient_shape = gradient.get_shape() zeros = tf.zeros(gradient_shape) abs_gradient = tf.abs(gradient) sign_gradient = tf.sign( gradient ) rnd_sample = tf.random_uniform(gradient_shape,0,scaler) where_cond = tf.less(rnd_sample, abs_gradient) binarized_gradient = tf.cond(tf.size(gradient) < FLAGS.size_to_binarize, lambda: gradient, lambda: tf.where(where_cond, sign_gradient * scaler, zeros)) binarized_gradients.append(binarized_gradient) return list(zip(binarized_gradients, variables))
def bboxes_filter_center(labels, bboxes, margins=[0., 0., 0., 0.], scope=None): """Filter out bounding boxes whose center are not in the rectangle [0, 0, 1, 1] + margins. The margin Tensor can be used to enforce or loosen this condition. Return: labels, bboxes: Filtered elements. """ with tf.name_scope(scope, 'bboxes_filter', [labels, bboxes]): cy = (bboxes[:, 0] + bboxes[:, 2]) / 2. cx = (bboxes[:, 1] + bboxes[:, 3]) / 2. mask = tf.greater(cy, margins[0]) mask = tf.logical_and(mask, tf.greater(cx, margins[1])) mask = tf.logical_and(mask, tf.less(cx, 1. + margins[2])) mask = tf.logical_and(mask, tf.less(cx, 1. + margins[3])) # Boolean masking... labels = tf.boolean_mask(labels, mask) bboxes = tf.boolean_mask(bboxes, mask) return labels, bboxes
def lr_schedule_op(self): lr_stage_0 = self.lr_start lr_stage_1 = tf.constant(0.0005) lr_stage_2 = tf.constant(0.0003) lr_state_3 = tf.constant(0.0001) gate_0 = tf.constant(int(5e5), dtype=tf.int32) gate_1 = tf.constant(int(1e6), dtype=tf.int32) gate_2 = tf.constant(int(2e6), dtype=tf.int32) def f1(): return lr_stage_0 def f2(): return lr_stage_1 def f3(): return lr_stage_2 def f4(): return lr_stage_3 new_lr = case([(tf.less(self.global_step, gate_0), f1), (tf.less(self.global_step, gate_1), f2),\ (tf.less(self.global_step, gate_2), f3)], default=f4, exclusive=False) return self.learning_rate.assign(new_lr)
def broadcast_against(tensor, against_expr): """Adds trailing dimensions to mask to enable broadcasting against data :param tensor: tensor to be broadcasted :param against_expr: tensor will be broadcasted against it :return: mask expr with tf.rank(mask) == tf.rank(data) """ def cond(data, tensor): return tf.less(tf.rank(tensor), tf.rank(data)) def body(data, tensor): return data, tf.expand_dims(tensor, -1) shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)] _, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants) return tensor
def get_hash_slots(self, query): """Gets hashed-to buckets for batch of queries. Args: query: 2-d Tensor of query vectors. Returns: A list of hashed-to buckets for each hash function. """ binary_hash = [ tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0) for i in xrange(self.num_libraries)] hash_slot_idxs = [ tf.reduce_sum( tf.to_int32(binary_hash[i]) * tf.constant([[2 ** i for i in xrange(self.num_hashes)]], dtype=tf.int32), 1) for i in xrange(self.num_libraries)] return hash_slot_idxs
def log_quaternion_loss_batch(predictions, labels, name='log_quaternion_batch_loss'): """A helper function to compute the error between quaternions. Args: predictions: A Tensor of size [batch_size, 4]. labels: A Tensor of size [batch_size, 4]. params: A dictionary of parameters. Expecting 'use_logging', 'batch_size'. Returns: A Tensor of size [batch_size], denoting the error between the quaternions. """ assertions = [] assertions.append( tf.Assert(tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(predictions), [1]) - 1), 1e-4)), ['The l2 norm of each prediction quaternion vector should be 1.'])) assertions.append( tf.Assert(tf.reduce_all(tf.less(tf.abs(tf.reduce_sum(tf.square(labels), [1]) - 1), 1e-4)), ['The l2 norm of each label quaternion vector should be 1.'])) with tf.name_scope(name): with tf.control_dependencies(assertions): product = tf.multiply(predictions, labels) internal_dot_products = tf.reduce_sum(product, [1]) logcost = tf.log(1e-4 + 1 - tf.abs(internal_dot_products)) return logcost
def _build_ops(self): i0 = tf.constant(0, dtype=tf.int32) loop_condition = lambda i, inputs, state: tf.less(i, self.max_steps) def body(i, inputs, full_state): idx = i % self.num_cores prev_state = full_state[idx] inputs, full_state[idx] = self.shared_cell(inputs, prev_state) return i+1, inputs, full_state _, inputs, full_state = tf.while_loop( loop_condition, body, loop_vars=[i0, self.inputs, self.initial_state])
def preprocess(image, size, max_length): shape = tf.shape(image) size_t = tf.constant(size, tf.float64) height = tf.cast(shape[0], tf.float64) width = tf.cast(shape[1], tf.float64) cond_op = tf.less(width, height) if max_length else tf.less(height, width) new_height, new_width = tf.cond( cond_op, lambda: (size_t, (width * size_t) / height), lambda: ((height * size_t) / width, size_t)) new_size = [tf.to_int32(new_height), tf.to_int32(new_width)] resized_image = tf.image.resize_images(image, new_size) normalised_image = resized_image - mean_pixel return normalised_image # max_length: Wether size dictates longest or shortest side. Default longest
def get_mu_tensor(self): const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef") coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) ) roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False) # filter out the correct root root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ), tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) ) # in case there are two duplicated roots satisfying the above condition root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] ) tf.assert_equal(tf.size(root), tf.constant(1) ) dr = self._h_max / self._h_min mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2) return mu
def flip_with_bboxes(image, bboxes): uniform_random = tf.random_uniform([], 0, 1.0) mirror_cond = tf.less(uniform_random, .5) stride = tf.where(mirror_cond, -1, 1) def flip(image, bboxes, stride): image = image[:, ::stride, :] img_w = tf.cast(tf.shape(image)[1], dtype=tf.float32) bbox_coords = tf.unstack(bboxes, num=4, axis=1) y_min = bbox_coords[0] x_min = bbox_coords[1] y_max = bbox_coords[2] x_max = bbox_coords[3] x_min_flip = img_w - x_max x_max_flip = img_w - x_min bboxes = tf.stack([y_min, x_min_flip, y_max, x_max_flip], 1, name='flip_bboxes') return image, bboxes def not_flip(image, bboxes): return image, bboxes image_fliped, bboxes = tf.cond(mirror_cond, lambda: flip(image, bboxes, stride), lambda: not_flip(image, bboxes)) return tf_image.fix_image_flip_shape(image, image_fliped), bboxes
def piecewise_function(param, values, changepoints, name=None, dtype=tf.float32): """Compute a piecewise function. Arguments: param: The function parameter. values: List of function values (numbers or tensors). changepoints: Sorted list of points where the function changes from one value to the next. Must be one item shorter than `values`. """ if len(changepoints) != len(values) - 1: raise ValueError("changepoints has length {}, expected {} (values " "has length {})".format(len(changepoints), len(values) - 1, len(values))) with tf.name_scope(name, "PiecewiseFunction", [param, values, changepoints]) as s_name: values = [tf.convert_to_tensor(y, dtype=dtype) for y in values] # this is a trick to make each lambda return a different y: lambdas = [lambda y=y: y for y in values] predicates = [tf.less(param, x) for x in changepoints] return tf.case(list(zip(predicates, lambdas[:-1])), lambdas[-1], name=s_name)
def smoothL1(x, sigma): ''' Tensorflow implementation of smooth L1 loss defined in Fast RCNN: (https://arxiv.org/pdf/1504.08083v2.pdf) 0.5 * (sigma * x)^2 if |x| < 1/sigma^2 smoothL1(x) = { |x| - 0.5/sigma^2 otherwise ''' with tf.variable_scope('smoothL1'): conditional = tf.less(tf.abs(x), 1/sigma**2) close = 0.5 * (sigma * x)**2 far = tf.abs(x) - 0.5/sigma**2 return tf.where(conditional, close, far)
def atan2(x, y, epsilon = 1.0e-12): """ A hack until the TensorFlow developers implement a function that can find the angle from an x and y co- ordinate. :param x: :param epsilon: :return: """ # Add a small number to all zeros, to avoid division by zero: x = tf.where(tf.equal(x, 0.0), x + epsilon, x) y = tf.where(tf.equal(y, 0.0), y + epsilon, y) angle = tf.where(tf.greater(x, 0.0), tf.atan(y / x), tf.zeros_like(x)) angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.greater_equal(y, 0.0)), tf.atan(y / x) + np.pi, angle) angle = tf.where(tf.logical_and(tf.less(x, 0.0), tf.less(y, 0.0)), tf.atan(y / x) - np.pi, angle) angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.greater(y, 0.0)), 0.5 * np.pi * tf.ones_like(x), angle) angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.less(y, 0.0)), -0.5 * np.pi * tf.ones_like(x), angle) angle = tf.where(tf.logical_and(tf.equal(x, 0.0), tf.equal(y, 0.0)), tf.zeros_like(x), angle) return angle # List of faces for consistent ordering.
def preprocess(image, size): shape = tf.shape(image) size_t = tf.constant(size, tf.float64) height = tf.cast(shape[0], tf.float64) width = tf.cast(shape[1], tf.float64) cond_op = tf.less(height, width) # ????? new_height, new_width = tf.cond( cond_op, lambda: (size_t, (width * size_t) / height), lambda: ((height * size_t) / width, size_t)) resized_image = tf.image.resize_images( image, [tf.to_int32(new_height), tf.to_int32(new_width)], method=tf.image.ResizeMethod.BICUBIC) cropped = tf.image.resize_image_with_crop_or_pad(resized_image, size, size) return cropped
def compute_states(self,emb): def unpack_sequence(tensor): return tf.unpack(tf.transpose(tensor, perm=[1, 0, 2])) with tf.variable_scope("Composition",initializer= tf.contrib.layers.xavier_initializer(),regularizer= tf.contrib.layers.l2_regularizer(self.reg)): cell = rnn_cell.LSTMCell(self.hidden_dim) #tf.cond(tf.less(self.dropout #if tf.less(self.dropout, tf.constant(1.0)): cell = rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout,input_keep_prob=self.dropout) #output, state = rnn.dynamic_rnn(cell,emb,sequence_length=self.lngths,dtype=tf.float32) outputs,_=rnn.rnn(cell,unpack_sequence(emb),sequence_length=self.lngths,dtype=tf.float32) #output = pack_sequence(outputs) sum_out=tf.reduce_sum(tf.pack(outputs),[0]) sent_rep = tf.div(sum_out,tf.expand_dims(tf.to_float(self.lngths),1)) final_state=sent_rep return final_state
def get_candidates_representations_in_sentence(self, sentence_candidate_answers, sentence_attentioned_hidden_states): candidate_answer_num=tf.gather(tf.shape(sentence_candidate_answers), 0) logging.warn('candidate_answer_num:{}'.format(candidate_answer_num)) logging.warn('sentence_candidate_answers:{}'.format(sentence_candidate_answers)) candidate_answer_nodeids=tf.gather(sentence_candidate_answers, 0) #a node idx list candidate_answer_hidden_list=tf.gather(sentence_attentioned_hidden_states, candidate_answer_nodeids) candidate_final_representations=self.get_candidate_answer_final_representations(candidate_answer_hidden_list) candidates_final_representations=tf.expand_dims(candidate_final_representations, 0) idx_cand=tf.constant(1) def _recurse_candidate_answer(candidate_final_representations, idx_cand): cur_candidate_answer_nodeids=tf.gather(sentence_candidate_answers, idx_cand) cur_candidate_answer_hidden_list=tf.gather(sentence_attentioned_hidden_states, cur_candidate_answer_nodeids) cur_candidate_final_representations=tf.expand_dims( self.get_candidate_answer_final_representations(cur_candidate_answer_hidden_list), 0) candidate_final_representations=tf.concat([candidate_final_representations, cur_candidate_final_representations], axis=0) idx_cand=tf.add(idx_cand,1) return candidate_final_representations, idx_cand loop_cond=lambda a1,idx:tf.less(idx, candidate_answer_num) loop_vars=[candidates_final_representations, idx_cand] candidates_final_representations, idx_cand=tf.while_loop(loop_cond, _recurse_candidate_answer, loop_vars, shape_invariants=[tf.TensorShape([None, 2*self.config.hidden_dim]),idx_cand.get_shape()]) return candidates_final_representations
def _compute_huber(predictions, labels, delta=1.0): predictions.get_shape().assert_is_compatible_with(labels.get_shape()) predictions = tf.to_float(predictions) labels = tf.to_float(labels) delta = tf.to_float(delta) diff = predictions - labels diff_abs = tf.abs(diff) delta_fact = 0.5 * tf.square(delta) condition = tf.less(diff_abs, delta) left_opt = 0.5 * tf.square(diff) right_opt = delta * diff_abs - delta_fact losses_val = tf.select(condition, left_opt, right_opt) return losses_val # Returns non-reduced tensor of unweighted losses with batch dimension matching inputs
def run_unary_modules_sample(modules, cur, hparams, k): """Run modules, sampling k.""" selection_weights = create_selection_weights( "selection", ("softmax_topk", k), shape=[len(modules)], inv_t=100.0 * common_layers.inverse_exp_decay( hparams.anneal_until, min_value=0.01)) all_res = [ tf.cond( tf.less(selection_weights.normalized[n], 1e-6), lambda: tf.zeros_like(cur), lambda i=n: modules[i](cur, hparams)) for n in xrange(len(modules)) ] all_res = tf.concat([tf.expand_dims(r, axis=0) for r in all_res], axis=0) res = all_res * tf.reshape(selection_weights.normalized, [-1, 1, 1, 1, 1]) return tf.reduce_sum(res, axis=0)
def neural_gpu_body(inputs, hparams, name=None): """The core Neural GPU.""" with tf.variable_scope(name, "neural_gpu"): def step(state, inp): # pylint: disable=missing-docstring x = tf.nn.dropout(state, 1.0 - hparams.dropout) for layer in xrange(hparams.num_hidden_layers): x = common_layers.conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, name="cgru_%d" % layer) # Padding input is zeroed-out in the modality, we check this by summing. padding_inp = tf.less(tf.reduce_sum(tf.abs(inp), axis=[1, 2]), 0.00001) new_state = tf.where(padding_inp, state, x) # No-op where inp is padding. return new_state return tf.foldl( step, tf.transpose(inputs, [1, 0, 2, 3]), initializer=inputs, parallel_iterations=1, swap_memory=True)
def imagenet_preprocess_example(example, mode, resize_size=None): """Preprocessing used for Imagenet and similar problems.""" if resize_size is None: resize_size = [299, 299] def preprocess(img): img = tf.image.resize_images(img, [360, 360]) img = common_layers.image_augmentation( tf.to_float(img) / 255., crop_size=resize_size) return tf.to_int64(img * 255.) def resize(img): return tf.to_int64(tf.image.resize_images(img, resize_size)) inputs = tf.cast(example["inputs"], tf.int64) if mode == tf.estimator.ModeKeys.TRAIN: example["inputs"] = tf.cond( # Preprocess 90% of the time. tf.less(tf.random_uniform([]), 0.9), lambda img=inputs: preprocess(img), lambda img=inputs: resize(img)) else: example["inputs"] = resize(inputs) return example
def normal_ccdf(x, mu, sigma2): """Normal CCDF""" # Check for degenerate distributions when sigma2 == 0 # if x >= mu, n = 0 # if x < mu, n = 1 # sigma2_le_0 = tf.less_equal(sigma2, 0.) # x_gte_mu = tf.greater_equal(x, mu) # x_lt_mu = tf.less(x, mu) # Never divide by zero, instead the logic below handles degenerate distribution cases # sigma2 = tf.cond(sigma2_le_0, lambda: tf.ones_like(sigma2), lambda: sigma2) p = (1. - 0.5 * (1. + tf.erf((x - mu) / tf.sqrt(2. * sigma2)))) # p = tf.cond(tf.logical_and(sigma2_le_0, x_gte_mu), lambda: tf.zeros_like(p), lambda: p) # p = tf.cond(tf.logical_and(sigma2_le_0, x_lt_mu), lambda: tf.ones_like(p), lambda: p) return p
def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function """ diff = prediction_tensor - target_tensor abs_diff = tf.abs(diff) abs_diff_lt_1 = tf.less(abs_diff, 1) anchorwise_smooth_l1norm = tf.reduce_sum( tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5), 2) * weights if self._anchorwise_output: return anchorwise_smooth_l1norm return tf.reduce_sum(anchorwise_smooth_l1norm)
def image_mirroring(img, label, seed): """ Randomly mirrors the images. Args: img: Training image to mirror. label: Segmentation mask to mirror. seed: Random seed. """ distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32, seed=seed)[0] mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5) mirror = tf.boolean_mask([0, 1, 2], mirror) img = tf.reverse(img, mirror) label = tf.reverse(label, mirror) return img, label
def lesser(x, y): '''Element-wise truth value of (x < y). Returns a bool tensor. ''' return tf.less(x, y)
def detectMinVal(input_mat, var, threshold=1e-6, name='', debug=False): eigen_min = tf.reduce_min(input_mat) eigen_max = tf.reduce_max(input_mat) eigen_ratio = eigen_max / eigen_min input_mat_clipped = clipoutNeg(input_mat, threshold) if debug: input_mat_clipped = tf.cond(tf.logical_or(tf.greater(eigen_ratio, 0.), tf.less(eigen_ratio, -500)), lambda: input_mat_clipped, lambda: tf.Print( input_mat_clipped, [tf.convert_to_tensor('screwed ratio ' + name + ' eigen values!!!'), tf.convert_to_tensor(var.name), eigen_min, eigen_max, eigen_ratio])) return input_mat_clipped
def _init_step_size(self, q, p, mass, get_gradient, get_log_posterior): factor = 1.5 def loop_cond(step_size, last_acceptance_rate, cond): return cond def loop_body(step_size, last_acceptance_rate, cond): # Calculate acceptance_rate new_q, new_p = leapfrog_integrator( q, p, tf.constant(0.0), step_size / 2, get_gradient, mass) new_q, new_p = leapfrog_integrator( new_q, new_p, step_size, step_size / 2, get_gradient, mass) __, _, _, _, acceptance_rate = get_acceptance_rate( q, p, new_q, new_p, get_log_posterior, mass, self.data_axes) acceptance_rate = tf.reduce_mean(acceptance_rate) # Change step size and stopping criteria new_step_size = tf.cond( tf.less(acceptance_rate, self.target_acceptance_rate), lambda: step_size * (1.0 / factor), lambda: step_size * factor) cond = tf.logical_not(tf.logical_xor( tf.less(last_acceptance_rate, self.target_acceptance_rate), tf.less(acceptance_rate, self.target_acceptance_rate))) return [new_step_size, acceptance_rate, cond] new_step_size, _, _ = tf.while_loop( loop_cond, loop_body, [self.step_size, tf.constant(1.0), tf.constant(True)] ) return new_step_size
def __lt__(self, other): return tf.less(self, other)
def _sample(self, n_samples): p = tf.sigmoid(self.logits) shape = tf.concat([[n_samples], self.batch_shape], 0) alpha = tf.random_uniform( shape, minval=0, maxval=1, dtype=self.param_dtype) samples = tf.cast(tf.less(alpha, p), dtype=self.dtype) static_n_samples = n_samples if isinstance(n_samples, int) else None samples.set_shape( tf.TensorShape([static_n_samples]).concatenate( self.get_batch_shape())) return samples
def smooth_l1_loss(bbox_prediction, bbox_target, sigma=1.0): """ Return Smooth L1 Loss for bounding box prediction. Args: bbox_prediction: shape (1, H, W, num_anchors * 4) bbox_target: shape (1, H, W, num_anchors * 4) Smooth L1 loss is defined as: 0.5 * x^2 if |x| < d abs(x) - 0.5 if |x| >= d Where d = 1 and x = prediction - target """ sigma2 = sigma ** 2 diff = bbox_prediction - bbox_target abs_diff = tf.abs(diff) abs_diff_lt_sigma2 = tf.less(abs_diff, 1.0 / sigma2) bbox_loss = tf.reduce_sum( tf.where( abs_diff_lt_sigma2, 0.5 * tf.square(abs_diff), abs_diff - 0.5 ), [1] ) return bbox_loss
def huber_loss(labels, predictions, delta=1.0): ''' Huber loss: L2 befor delta, L1 after delta ''' residual = tf.abs(predictions - labels) condition = tf.less(residual, delta) small_res = 0.5 * tf.square(residual) large_res = delta * residual - 0.5 * tf.square(delta) return tf.where(condition, small_res, large_res)
def cout_zeros(): zeros_num = 0. all_num = 0. for v in tf.trainable_variables(): zeros_num += tf.reduce_sum(tf.to_float(tf.less(tf.abs(v), tf.ones_like(v) * 0.0001))) # ??0??? all_num += tf.reduce_sum(tf.ones_like(v)) return [zeros_num, all_num] #????
def exceedingAngleThreshold(pred, gt, ss, threshold, outputChannels=2): with tf.name_scope("angular_error"): pred = tf.reshape(pred, (-1, outputChannels)) gt = tf.to_float(tf.reshape(gt, (-1, outputChannels))) ss = tf.to_float(tf.reshape(ss, (-1, 1))) pred = tf.nn.l2_normalize(pred, 1) * 0.999999 gt = tf.nn.l2_normalize(gt, 1) * 0.999999 errorAngles = tf.acos(tf.reduce_sum(pred * gt, reduction_indices=[1], keep_dims=True)) * ss exceedCount = tf.reduce_sum(tf.to_float(tf.less(threshold/180*3.14159, errorAngles))) return exceedCount
def testCreatePhasesWithLoop(self): # Test a preprocessing function with control flow. # # The loop represents # # i = 0 # while i < 10: # i += 1 # x += 1 # # To get an error in the case where apply_function is not called, we have # to call an analyzer first (see testCreatePhasesWithUnwrappedLoop). So # we also do so here. def preprocessing_fn(inputs): def _subtract_ten(x): i = tf.constant(0) c = lambda i, x: tf.less(i, 10) b = lambda i, x: (tf.add(i, 1), tf.add(x, -1)) return tf.while_loop(c, b, [i, x])[1] scaled_to_0_1 = mappers.scale_to_0_1( api.apply_function(_subtract_ten, inputs['x'])) return {'x_scaled': scaled_to_0_1} input_schema = sch.Schema({ 'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation()) }) graph, _, _ = impl_helper.run_preprocessing_fn( preprocessing_fn, input_schema) phases = impl_helper.create_phases(graph) self.assertEqual(len(phases), 1) self.assertEqual(len(phases[0].analyzers), 2)
def testCreatePhasesWithUnwrappedLoop(self): # Test a preprocessing function with control flow. # # The loop represents # # i = 0 # while i < 10: # i += 1 # x += 1 # # We need to call an analyzer after the loop because only the transitive # parents of analyzers are inspected by create_phases def preprocessing_fn(inputs): def _subtract_ten(x): i = tf.constant(0) c = lambda i, x: tf.less(i, 10) b = lambda i, x: (tf.add(i, 1), tf.add(x, -1)) return tf.while_loop(c, b, [i, x])[1] scaled_to_0_1 = mappers.scale_to_0_1(_subtract_ten(inputs['x'])) return {'x_scaled': scaled_to_0_1} input_schema = sch.Schema({ 'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation()) }) graph, _, _ = impl_helper.run_preprocessing_fn( preprocessing_fn, input_schema) with self.assertRaisesRegexp(ValueError, 'Cycle detected'): _ = impl_helper.create_phases(graph)
def sequential_for(fn, begin, end): def _cond(i): return tf.less(i, end) def _body(i): ops = fn(i) with tf.control_dependencies(ops): return i + 1 return tf.while_loop(_cond, _body, [begin])
def smooth_l1(x): l2 = 0.5 * (x**2.0) l1 = tf.abs(x) - 0.5 condition = tf.less(tf.abs(x), 1.0) re = tf.where(condition, l2, l1) return re
def lt(self, x, y): return tf.less(x, y)