我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.floor()。
def dice(y_true, y_pred): """ Computes the Sorensen-Dice metric TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) y_sum = K.sum(y_true * y_pred_decision) return (2. * y_sum + K.epsilon()) / (K.sum(y_true) + K.sum(y_pred_decision) + K.epsilon())
def cyclic_learning_rate( learning_rate_min, learning_rate_max, step_size, global_step, mode='triangular', scope=None): with tf.variable_scope(scope, 'CyclicLearningRate'): cycle = tf.floor(1 + tf.to_float(global_step) / (2 * step_size)) if mode == 'triangular': scale = 1 elif mode == 'triangular2': scale = 2**(cycle - 1) else: raise ValueError('Unrecognized mode: {}'.format(mode)) x = tf.abs(tf.to_float(global_step) / step_size - 2 * cycle + 1) lr = learning_rate_min + (learning_rate_max - learning_rate_min) * \ tf.maximum(0.0, 1 - x) / scale return lr
def fixed_dropout(xs, keep_prob, noise_shape, seed=None): """ Apply dropout with same mask over all inputs Args: xs: list of tensors keep_prob: noise_shape: seed: Returns: list of dropped inputs """ with tf.name_scope("dropout", values=xs): noise_shape = noise_shape # uniform [keep_prob, 1.0 + keep_prob) random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape, seed=seed, dtype=xs[0].dtype) # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) binary_tensor = tf.floor(random_tensor) outputs = [] for x in xs: ret = tf.div(x, keep_prob) * binary_tensor ret.set_shape(x.get_shape()) outputs.append(ret) return outputs
def mu_law(x, mu=255, int8=False): """A TF implementation of Mu-Law encoding. Args: x: The audio samples to encode. mu: The Mu to use in our Mu-Law. int8: Use int8 encoding. Returns: out: The Mu-Law encoded int8 data. """ out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu) out = tf.floor(out * 128) if int8: out = tf.cast(out, tf.int8) return out
def blend_layers(control, shape, feather=1.0, *layers): layer_count = len(layers) control = normalize(control) control *= layer_count control_floor = tf.cast(control, tf.int32) x_index = row_index(shape) y_index = column_index(shape) layers = tf.stack(list(layers) + [layers[-1]]) layer_count += 1 floor_values = control_floor[:, :, 0] # I'm not sure why the mod operation is needed, but tensorflow-cpu explodes without it. combined_layer_0 = tf.gather_nd(layers, tf.stack([floor_values % layer_count, y_index, x_index], 2)) combined_layer_1 = tf.gather_nd(layers, tf.stack([(floor_values + 1) % layer_count, y_index, x_index], 2)) control_floor_fract = control - tf.floor(control) control_floor_fract = tf.minimum(tf.maximum(control_floor_fract - (1.0 - feather), 0.0) / feather, 1.0) return blend(combined_layer_0, combined_layer_1, control_floor_fract)
def posterize(tensor, levels): """ Reduce the number of color levels per channel. :param Tensor tensor: :param int levels: :return: Tensor """ tensor *= levels tensor = tf.floor(tensor) tensor /= levels return tensor
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(tf.shape(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
def _quantize(x, params, randomize=True): """Quantize x according to params, optionally randomizing the rounding.""" if not params.quantize: return x if not randomize: return tf.bitcast( tf.cast(x / params.quantization_scale, tf.int16), tf.float16) abs_x = tf.abs(x) sign_x = tf.sign(x) y = abs_x / params.quantization_scale y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) y = tf.minimum(y, tf.int16.max) * sign_x q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) return q
def feature_sharing(features): """Feature sharing operation. Args: features: List of hidden features from models. """ nmodel = len(features) with tf.variable_scope('feature_sharing'): shape = features[0].get_shape() output = [0.]*nmodel for from_idx in range(nmodel): for to_idx in range(nmodel): if from_idx == to_idx: # don't drop hidden features within a model. mask = 1. else: # randomly drop features to share with another model. mask = tf.floor(0.7 + tf.random_uniform(shape)) output[to_idx] += mask * features[from_idx] return output
def dropout(x, pkeep, phase=None, mask=None): mask = tf.floor(pkeep + tf.random_uniform(tf.shape(x))) if mask is None else mask if phase is None: return mask * x else: return switch(phase, mask*x, pkeep*x)
def dice_1(y): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_true, y_pred = y y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :,1] mask_pred = y_pred_decision[:, :, :, :, 1] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_2(y): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_true, y_pred = y y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 2] mask_pred = y_pred_decision[:, :, :, :, 2] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_3(y): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_true, y_pred = y y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 3] mask_pred = y_pred_decision[:, :, :, :, 3] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_coef(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred_decision) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + K.epsilon()) / (K.sum(y_true_f) + K.sum(y_pred_f) + K.epsilon())
def dice_core(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) mask_true1 = y_true[:, :, :, :, 3:] mask_true2 = y_true[:, :, :, :, 1:2] mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4) mask_pred1 = y_pred_decision[:, :, :, :, 3:] mask_pred2 = y_pred_decision[:, :, :, :, 1:2] mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4) y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_enhance(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 3] mask_pred = y_pred_decision[:, :, :, :, 3] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon()) # def accuracy_survival(y_true, y_predicted):
def dice_whole_mod(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ # mask = K.expand_dims(K.sum(y_true,axis=4),axis=4) # cmp_mask = K.concatenate([K.ones_like(mask) - mask,K.zeros_like(mask), K.zeros_like(mask)],axis=4) # y_pred = y_pred + cmp_mask y_true = y_true[:,:,:,:,:3] y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) mask_true = K.sum(y_true, axis=4) mask_pred = K.sum(y_pred_decision, axis=4) * K.sum(y_true, axis=4) y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_core_mod(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_true = y_true[:,:,:,:,:3] y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision) mask_true1 = K.expand_dims(y_true[:, :, :, :, 2],axis=4) mask_true2 = K.expand_dims(y_true[:, :, :, :, 0],axis=4) mask_true = K.sum(K.concatenate([mask_true1, mask_true2], axis=4), axis=4) mask_pred1 = K.expand_dims(y_pred_decision[:, :, :, :, 2],axis=4) mask_pred2 = K.expand_dims(y_pred_decision[:, :, :, :, 0],axis=4) mask_pred = K.sum(K.concatenate([mask_pred1, mask_pred2], axis=4), axis=4) * K.sum(y_true, axis=4) y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_enhance_mod(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_true = y_true[:,:,:,:,:3] y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) # y_pred_decision = tf.where(tf.is_nan(y_pred_decision), tf.zeros_like(y_pred_decision), y_pred_decision) mask_true = y_true[:, :, :, :, 2] mask_pred = y_pred_decision[:, :, :, :, 2] * K.sum(y_true, axis=4) y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_1(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :,1] mask_pred = y_pred_decision[:, :, :, :, 1] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_1_2D(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon())/ K.max(y_pred, axis=2, keepdims=True)) mask_true = y_true[:, :, 1] mask_pred = y_pred_decision[:, :, 1] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_2(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 2] mask_pred = y_pred_decision[:, :, :, :, 2] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_3(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,0 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 3] mask_pred = y_pred_decision[:, :, :, :, 3] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def dice_4(y_true, y_pred): """ Computes the Sorensen-Dice metric, where P come from class 1,2,3,4,5 TP Dice = 2 ------- T + P Parameters ---------- y_true : keras.placeholder Placeholder that contains the ground truth labels of the classes y_pred : keras.placeholder Placeholder that contains the class prediction Returns ------- scalar Dice metric """ y_pred_decision = tf.floor((y_pred + K.epsilon()) / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 4] mask_pred = y_pred_decision[:, :, :, :, 4] y_sum = K.sum(mask_true * mask_pred) return (2. * y_sum + K.epsilon()) / (K.sum(mask_true) + K.sum(mask_pred) + K.epsilon())
def precision_1(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 1] mask_pred = y_pred_decision[:, :, :, :, 1] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
def precision_2(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 2] mask_pred = y_pred_decision[:, :, :, :, 2] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
def precision_3(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 3] mask_pred = y_pred_decision[:, :, :, :, 3] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
def precision_4(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 4] mask_pred = y_pred_decision[:, :, :, :, 4] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_pred) + K.epsilon())
def recall_0(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 0] mask_pred = y_pred_decision[:, :, :, :, 0] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())
def recall_2(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 2] mask_pred = y_pred_decision[:, :, :, :, 2] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())
def recall_3(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 3] mask_pred = y_pred_decision[:, :, :, :, 3] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())
def recall_4(y_true, y_pred): y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True)) mask_true = y_true[:, :, :, :, 4] mask_pred = y_pred_decision[:, :, :, :, 4] y_sum = K.sum(mask_true * mask_pred) return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon()) # -------------------------- Masked metrics --------------------------------
def _dropout(values, recurrent_noise, keep_prob): def dropout(index, value, noise): random_tensor = keep_prob + noise binary_tensor = tf.floor(random_tensor) ret = tf.div(value, keep_prob) * binary_tensor ret.set_shape(value.get_shape()) return ret return DropoutGRUCell._enumerated_map_structure(dropout, values, recurrent_noise)
def normalize(tensor): """ Squeeze the given Tensor into a range between 0 and 1. :param Tensor tensor: An image tensor. :return: Tensor """ floor = tf.reduce_min(tensor) ceil = tf.reduce_max(tensor) return (tensor - floor) / (ceil - floor)
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True): random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32) binary_mask = tf.floor(random_tensor) if normalize: binary_mask = tf.reciprocal(keep_prob) * binary_mask return binary_mask
def sample(probs): # Takes in a vector of probabilities, and returns a random vector of 0s and 1s sampled from the input vector return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1)) # This function runs the gibbs chain. We will call this function in two places: # - When we define the training update step # - When we sample our music segments from the trained RBM
def sample(probs): #Takes in a vector of probabilities, and returns a random vector of 0s and 1s sampled from the input vector return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1)) #This function runs the gibbs chain. We will call this function in two places: # - When we define the training update step # - When we sample our music segments from the trained RBM
def drop_path(net, keep_prob, is_training=True): """Drops out a whole example hiddenstate with the specified probability.""" if is_training: batch_size = tf.shape(net)[0] noise_shape = [batch_size, 1, 1, 1] random_tensor = keep_prob random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32) binary_tensor = tf.floor(random_tensor) net = tf.div(net, keep_prob) * binary_tensor return net
def setUp(self): super(CoreUnaryOpsTest, self).setUp() self.ops = [ ('abs', operator.abs, tf.abs, core.abs_function), ('neg', operator.neg, tf.neg, core.neg), # TODO(shoyer): add unary + to core TensorFlow ('pos', None, None, None), ('sign', None, tf.sign, core.sign), ('reciprocal', None, tf.reciprocal, core.reciprocal), ('square', None, tf.square, core.square), ('round', None, tf.round, core.round_function), ('sqrt', None, tf.sqrt, core.sqrt), ('rsqrt', None, tf.rsqrt, core.rsqrt), ('log', None, tf.log, core.log), ('exp', None, tf.exp, core.exp), ('log', None, tf.log, core.log), ('ceil', None, tf.ceil, core.ceil), ('floor', None, tf.floor, core.floor), ('cos', None, tf.cos, core.cos), ('sin', None, tf.sin, core.sin), ('tan', None, tf.tan, core.tan), ('acos', None, tf.acos, core.acos), ('asin', None, tf.asin, core.asin), ('atan', None, tf.atan, core.atan), ('lgamma', None, tf.lgamma, core.lgamma), ('digamma', None, tf.digamma, core.digamma), ('erf', None, tf.erf, core.erf), ('erfc', None, tf.erfc, core.erfc), ('lgamma', None, tf.lgamma, core.lgamma), ] total_size = np.prod([v.size for v in self.original_lt.axes.values()]) self.test_lt = core.LabeledTensor( tf.cast(self.original_lt, tf.float32) / total_size, self.original_lt.axes)
def _setup_net(self): with tf.variable_scope('bbox'): inputs_0 = tf.Variable(trainable=False, validate_shape=(None, self.config.size[0], self.config.size[1], 3)) self.bbox_infer_model._setup_input(inputs_0) assign_op = tf.assign(inputs_0, self.data_batches) with tf.control_dependencies([assign_op]): self.bbox_infer_model._setup_net() def crop_bbox(width, height, input, bbox): expand_rate = 0.1 top = tf.maximum(tf.floor(bbox[1] * height - height * expand_rate), 0) bottom = tf.minimum(tf.floor((bbox[1] + bbox[3]) * height + height * expand_rate), height) left = tf.maximum(tf.floor(bbox[0] * width - width * expand_rate), 0) right = tf.minimum((tf.floor(bbox[0] + bbox[2]) * width + width * expand_rate), width) top = tf.cond(top >= bottom, lambda: tf.identity(0), lambda: tf.identity(top)) bottom = tf.cond(top >= bottom, lambda: tf.identity(height), lambda: tf.identity(bottom)) left = tf.cond(left >= right, lambda: tf.identity(0), lambda: tf.identity(left)) right = tf.cond(left >= right, lambda: tf.identity(width), lambda: tf.identity(right)) return input[top:bottom, left:right, :] with tf.variable_scope('nsr'): origin_width, origin_height = 512, 512 inputs_1 = tf.Variable(trainable=False, validate_shape=(None, self.config.size[0], self.config.size[1], 3)) self.infer_model._setup_input(inputs_1) inputs = self.bbox_infer_model.inputs bboxes = self.bbox_infer_model.model_output inputs = tf.stack([crop_bbox(origin_width, origin_height, inputs[i], bboxes[i]) for i in range(self.config.batch_size)]) inputs = tf.image.resize_images(inputs, self.config.size) assign_op = tf.assign(inputs_1, inputs) with tf.control_dependencies([assign_op]): self.infer_model._setup_net() vars_dict = self._vars() assign_ops = assign_vars(vars_dict, self.bbox_vars_dict, 'bbox') assign_ops.extend(assign_vars(vars_dict, self.vars_dict, 'nsr')) with tf.control_dependencies(assign_ops): self.output = stack_output(self.max_number_length, self.length_output, self.numbers_output)
def _build_graph(self): with tf.variable_scope("generator") as scope: print("### Print Generator Intermediate Parameter") self.prior = tf.placeholder(dtype=tf.float32, shape=(None, 100), name="prior_gen") self.is_training = tf.placeholder(dtype=tf.bool, shape = (), name="training_flag") prior_proj = tf.contrib.layers.fully_connected(inputs=self.prior, num_outputs=4*4*1024, activation_fn=None, scope="prior_projection") prior_proj = tf.contrib.layers.batch_norm(inputs=prior_proj, center=True, scale=True, activation_fn=tf.nn.leaky_relu, is_training= self.is_training, scope="bn0") conv0 = tf.reshape(prior_proj, (-1, 4, 4, 1024)) conv1 = tf.contrib.layers.convolution2d_transpose(inputs=conv0, num_outputs=512, activation_fn=None, kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv1") conv1 = tf.contrib.layers.batch_norm(inputs=conv1, center=True, scale=True, activation_fn=tf.nn.leaky_relu, is_training= self.is_training, scope="bn1") print(conv1.shape) conv2 = tf.contrib.layers.convolution2d_transpose(inputs=conv1, num_outputs=256, activation_fn=None, kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv2") conv2 = tf.contrib.layers.batch_norm(inputs=conv2, center=True, scale=True, activation_fn=tf.nn.leaky_relu, is_training= self.is_training, scope="bn2") print(conv2.shape) conv3 = tf.contrib.layers.convolution2d_transpose(inputs=conv2, num_outputs=128, activation_fn=None, kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv3") conv3 = tf.contrib.layers.batch_norm(inputs=conv3, center=True, scale=True, activation_fn=tf.nn.leaky_relu, is_training= self.is_training, scope="bn3") print(conv3.shape) conv4 = tf.contrib.layers.convolution2d_transpose(inputs=conv3, num_outputs=3, activation_fn=None, kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv4") self.gen_img = tf.nn.tanh(conv4) self.gen_img_out = tf.cast(x= tf.floor(self.gen_img*128.0 + 128.0), dtype=tf.int32) print(conv4.shape) print("### End Print Generator Intermediate Parameter") # tf.reset_default_graph() # g = Generator()
def to_unpacked_coordinates(ix, l, bound): ix = tf.cast(ix, tf.int32) # You can actually compute the lens in closed form: # lens = tf.floor(0.5 * (-tf.sqrt(4 * tf.square(l) + 4 * l - 8 * ix + 1) + 2 * l + 1)) # but it is very ugly and rounding errors could cause problems, so this approach seems safer lens = [] for i in range(bound): lens.append(tf.fill((l - i,), i)) lens = tf.concat(lens, axis=0) lens = tf.gather(lens, ix) answer_start = ix - l * lens + lens * (lens - 1) // 2 return tf.stack([answer_start, answer_start+lens], axis=1)
def dk_mod(x, y): """Differentiable mod, Donald Knuth style Args x: first argument y: second argument Returns mod between x and y """ return x - y * tf.floor(x / y) # Register the gradient for the mod operation. tf.mod() does not have a gradient implemented.
def discretized_logistic(mean, logscale, binsize=1 / 256.0, sample=None): scale = tf.exp(logscale) sample = (tf.floor(sample / binsize) * binsize - mean) / scale logp = tf.log(tf.sigmoid(sample + binsize / scale) - tf.sigmoid(sample) + 1e-7) return tf.reduce_sum(logp, [1, 2, 3])
def floor(x): return tf.floor(x)
def test_sample(self): import numpy as np h, w = 3, 4 def np_sample(img, coords): # a reference implementation coords = np.maximum(coords, 0) coords = np.minimum(coords, np.array([img.shape[1]-1, img.shape[2]-1])) xs = coords[:,:,:,1].reshape((img.shape[0], -1)) ys = coords[:,:,:,0].reshape((img.shape[0], -1)) ret = np.zeros((img.shape[0], coords.shape[1], coords.shape[2], img.shape[3]), dtype='float32') for k in range(img.shape[0]): xss, yss = xs[k], ys[k] ret[k,:,:,:] = img[k,yss,xss,:].reshape((coords.shape[1], coords.shape[2], 3)) return ret bimg = np.random.rand(2, h, w, 3).astype('float32') #mat = np.array([ #[[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]], #[[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]] #], dtype='float32') #2x2x2x2 mat = (np.random.rand(2, 5, 5, 2) - 0.2) * np.array([h + 3, w + 3]) true_res = np_sample(bimg, np.floor(mat + 0.5).astype('int32')) inp, mapping = self.make_variable(bimg, mat) output = sample(inp, tf.cast(tf.floor(mapping+0.5), tf.int32)) res = self.run_variable(output) self.assertTrue((res == true_res).all())