我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.scalar_summary()。
def build_model(self): self.q = tf.placeholder(tf.float32, [self.reader.vocab_size], name="question") self.a = tf.placeholder(tf.float32, [self.reader.vocab_size], name="answer") self.build_encoder() self.build_decoder() # Kullback Leibler divergence self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq)) # Log likelihood self.g_loss = tf.reduce_sum(tf.log(self.p_x_i)) self.loss = tf.reduce_mean(self.e_loss + self.g_loss) self.optim = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(-self.loss) _ = tf.scalar_summary("encoder loss", self.e_loss) _ = tf.scalar_summary("decoder loss", self.g_loss) _ = tf.scalar_summary("loss", self.loss)
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) # tf.histogram_summary(tensor_name + '/activations', x) tf.summary.histogram(tensor_name + '/activations', x) # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x)) tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def get_loss(pred, label, end_points, reg_weight=0.001): """ pred: BxNxC, label: BxN, """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) classify_loss = tf.reduce_mean(loss) tf.scalar_summary('classify loss', classify_loss) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) tf.scalar_summary('mat_loss', mat_diff_loss) return classify_loss + mat_diff_loss * reg_weight
def define_summaries(self): '''Helper function for init_opt''' all_sum = {'g': [], 'd': [], 'hr_g': [], 'hr_d': [], 'hist': []} for k, v in self.log_vars: if k.startswith('g'): all_sum['g'].append(tf.scalar_summary(k, v)) elif k.startswith('d'): all_sum['d'].append(tf.scalar_summary(k, v)) elif k.startswith('hr_g'): all_sum['hr_g'].append(tf.scalar_summary(k, v)) elif k.startswith('hr_d'): all_sum['hr_d'].append(tf.scalar_summary(k, v)) elif k.startswith('hist'): all_sum['hist'].append(tf.histogram_summary(k, v)) self.g_sum = tf.merge_summary(all_sum['g']) self.d_sum = tf.merge_summary(all_sum['d']) self.hr_g_sum = tf.merge_summary(all_sum['hr_g']) self.hr_d_sum = tf.merge_summary(all_sum['hr_d']) self.hist_sum = tf.merge_summary(all_sum['hist'])
def compute_cost(self): losses = tf.nn.seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) with tf.name_scope('average_cost'): self.cost = tf.div( tf.reduce_sum(losses, name='losses_sum'), self.batch_size, name='average_cost') tf.scalar_summary('cost', self.cost)
def _gan_loss(self, logits_real, logits_fake, feature_real, feature_fake, use_features=False): discriminator_loss_real = self._cross_entropy_loss(logits_real, tf.ones_like(logits_real), name="disc_real_loss") discriminator_loss_fake = self._cross_entropy_loss(logits_fake, tf.zeros_like(logits_fake), name="disc_fake_loss") self.discriminator_loss = discriminator_loss_fake + discriminator_loss_real gen_loss_disc = self._cross_entropy_loss(logits_fake, tf.ones_like(logits_fake), name="gen_disc_loss") if use_features: gen_loss_features = tf.reduce_mean(tf.nn.l2_loss(feature_real - feature_fake)) / (self.crop_image_size ** 2) else: gen_loss_features = 0 self.gen_loss = gen_loss_disc + 0.1 * gen_loss_features tf.scalar_summary("Discriminator_loss", self.discriminator_loss) tf.scalar_summary("Generator_loss", self.gen_loss)
def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
def policy_gradient(): with tf.variable_scope("policy"): params = tf.get_variable("policy_parameters", [4, 2]) state = tf.placeholder("float", [None, 4]) actions = tf.placeholder("float", [None, 2]) advantages = tf.placeholder("float", [None, 1]) reward_input = tf.placeholder("float") episode_reward = tf.get_variable("episode_reward", initializer=tf.constant(0.)) episode_reward = reward_input linear = tf.matmul(state, params) probabilities = tf.nn.softmax(linear) good_probabilities = tf.reduce_sum(tf.mul(probabilities, actions), reduction_indices=[1]) eligibility = tf.log(good_probabilities) * advantages loss = -tf.reduce_sum(eligibility) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss) tf.scalar_summary("loss", loss) tf.scalar_summary("episode_reward", episode_reward) return probabilities, state, actions, advantages, optimizer, reward_input, episode_reward
def _activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _add_loss_summaries(total_loss): """Add summaries for losses. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name + ' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
def log(self, key, val, step_num): """Directly log a scalar value to the event file. :param string key: a name for the value :param val: a float :param step_num: the iteration number at which this value was logged """ try: ph, summ = self.summaries[key] except KeyError: # if we haven't defined a variable for this key, define one with self.g.as_default(): ph = tf.placeholder(tf.float32, (), name=key) # scalar summ = tf.scalar_summary(key, ph) self.summaries[key] = (ph, summ) summary_str = self.sess.run(summ, {ph: val}) self.summ_writer.add_summary(summary_str, step_num) return val
def build_model(self): self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.input_size, self.input_size, 3], name='real_images') # self.inputs = tf.placeholder(tf.float32, [None, self.input_size, self.input_size, 3], name='real_images') try: self.up_inputs = tf.image.resize_images(self.inputs, self.image_shape[0], self.image_shape[1], tf.image.ResizeMethod.NEAREST_NEIGHBOR) except ValueError: # newer versions of tensorflow self.up_inputs = tf.image.resize_images(self.inputs, [self.image_shape[0], self.image_shape[1]], tf.image.ResizeMethod.NEAREST_NEIGHBOR) self.images = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape, name='real_images') # self.images = tf.placeholder(tf.float32, [None] + self.image_shape, name='real_images') self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.image_shape, name='sample_images') # self.sample_images = tf.placeholder(tf.float32, [None] + self.image_shape, name='sample_images') self.G = self.generator(self.inputs) self.G_sum = tf.image_summary("G", self.G) self.g_loss = tf.reduce_mean(tf.square(self.images-self.G)) self.g_loss_sum = tf.scalar_summary("g_loss", self.g_loss) t_vars = tf.trainable_variables() self.g_vars = [var for var in t_vars if 'g_' in var.name] self.saver = tf.train.Saver()
def _init_summaries(self): if self.is_train: logdir = os.path.join(SUMMARY_PATH, self.log_name, 'train') self.summary_writer = tf.summary.FileWriter(logdir) self.summary_writer_by_points = [tf.summary.FileWriter(os.path.join(logdir, 'point_%02d' % i)) for i in range(16)] tf.scalar_summary('Average euclidean distance', self.euclidean_dist, collections = [KEY_SUMMARIES]) for i in range(16): tf.scalar_summary('Joint euclidean distance', self.euclidean_dist_per_joint[i], collections = [KEY_SUMMARIES_PER_JOINT[i]]) self.create_summary_from_weights() self.ALL_SUMMARIES = tf.merge_all_summaries(KEY_SUMMARIES) self.SUMMARIES_PER_JOINT = [tf.merge_all_summaries(KEY_SUMMARIES_PER_JOINT[i]) for i in range(16)] else: logdir = os.path.join(SUMMARY_PATH, self.log_name, 'test') self.summary_writer = tf.summary.FileWriter(logdir)
def loss(self, inf_targets, inf_vads, targets, vads, mtl_fac): ''' Loss definition Only speech inference loss is defined and work quite well Add VAD cross entropy loss if you want ''' loss_v1 = tf.nn.l2_loss(inf_targets - targets) / self.batch_size loss_o = loss_v1 reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # ipdb.set_trace() loss_v = loss_o + tf.add_n(reg_loss) tf.scalar_summary('loss', loss_v) # loss_merge = tf.cond( # is_val, lambda: tf.scalar_summary('val_loss_batch', loss_v), # lambda: tf.scalar_summary('loss', loss_v)) return loss_v, loss_o # return tf.reduce_mean(tf.nn.l2_loss(inf_targets - targets))
def _setup_training(self): """ Set up a data flow graph for fine tuning """ layer_num = self.layer_num act_func = ACTIVATE_FUNC[self.activate_func] sigma = self.sigma lr = self.learning_rate weights = self.weights biases = self.biases data1, data2 = self.data1, self.data2 batch_size = self.batch_size optimizer = OPTIMIZER[self.optimizer] with tf.name_scope("training"): s1 = self._obtain_score(data1, weights, biases, act_func, "1") s2 = self._obtain_score(data2, weights, biases, act_func, "2") with tf.name_scope("cost"): sum_cost = tf.reduce_sum(tf.log(1 + tf.exp(-sigma*(s1-s2)))) self.cost = cost = sum_cost / batch_size self.optimize = optimizer(lr).minimize(cost) for n in range(layer_num-1): tf.histogram_summary("weight"+str(n), weights[n]) tf.histogram_summary("bias"+str(n), biases[n]) tf.scalar_summary("cost", cost)
def add_evaluation_step(result_tensor, ground_truth_tensor): """Inserts the operations we need to evaluate the accuracy of our results. Args: result_tensor: The new final node that produces results. ground_truth_tensor: The node we feed ground truth data into. Returns: Nothing. """ with tf.name_scope('accuracy'): with tf.name_scope('correct_prediction'): correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \ tf.argmax(ground_truth_tensor, 1)) with tf.name_scope('accuracy'): evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.scalar_summary('accuracy', evaluation_step) return evaluation_step
def build_summaries(): episode_reward = tf.Variable(0.) scalar_summary("Reward", episode_reward) episode_ave_max_q = tf.Variable(0.) scalar_summary("Qmax Value", episode_ave_max_q) logged_epsilon = tf.Variable(0.) scalar_summary("Epsilon", logged_epsilon) # Threads shouldn't modify the main graph, so we use placeholders # to assign the value of every summary (instead of using assign method # in every thread, that would keep creating new ops in the graph) summary_vars = [episode_reward, episode_ave_max_q, logged_epsilon] summary_placeholders = [tf.placeholder("float") for i in range(len(summary_vars))] assign_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))] summary_op = merge_all_summaries() return summary_placeholders, assign_ops, summary_op
def _add_loss_summaries(total_loss): """Add summaries for losses in CNN model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name + ' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
def _activation_summary(self, x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. # Error: these summaries cause high classifier error!!! # All inputs to node MergeSummary/MergeSummary must be from the same frame. # tensor_name = re.sub('%s_[0-9]*/' % "tower", '', x.op.name) # tf.histogram_summary(tensor_name + '/activations', x) # tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _build_loss(self): config = self.config JX = tf.shape(self.x)[2] M = tf.shape(self.x)[1] JQ = tf.shape(self.q)[1] loss_mask = tf.reduce_max(tf.cast(self.q_mask, 'float'), 1) losses = tf.nn.softmax_cross_entropy_with_logits( self.logits, tf.cast(tf.reshape(self.y, [-1, M * JX]), 'float')) ce_loss = tf.reduce_mean(loss_mask * losses) tf.add_to_collection('losses', ce_loss) ce_loss2 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( self.logits2, tf.cast(tf.reshape(self.y2, [-1, M * JX]), 'float'))) tf.add_to_collection("losses", ce_loss2) self.loss = tf.add_n(tf.get_collection('losses', scope=self.scope), name='loss') tf.scalar_summary(self.loss.op.name, self.loss) tf.add_to_collection('ema/scalar', self.loss)
def Train(self, loss, learning_rate, clip_value_min, clip_value_max, name='training'): tf.scalar_summary(':'.join([name, loss.op.name]), loss) optimizer = tf.train.AdagradOptimizer(learning_rate) grads_and_vars = optimizer.compute_gradients(loss) clipped_grads_and_vars = [ (tf.clip_by_value(g, clip_value_min, clip_value_max), v) for g, v in grads_and_vars ] for g, v in clipped_grads_and_vars: _ = tf.histogram_summary(':'.join([name, v.name]), v) _ = tf.histogram_summary('%s: gradient for %s' % (name, v.name), g) train_op = optimizer.apply_gradients(clipped_grads_and_vars) return train_op
def _init_loss(cls, config, q, expected_q, actions, reg_loss=None, summaries=None): """ Setup the loss function and apply regularization is provided. @return: loss_op """ q_masked = tf.reduce_sum(tf.mul(q, actions), reduction_indices=[1]) loss = tf.reduce_mean(tf.squared_difference(q_masked, expected_q)) if reg_loss is not None: loss += config.reg_param * reg_loss if summaries is not None: summaries.append(tf.scalar_summary('loss', loss)) return loss
def grad(self, loc_mean_t, loc_t, h_t, prob, pred, labels): loss1, grads1 = self.grad_reinforcement(loc_mean_t, loc_t, h_t, prob, pred, labels) loss2, grads2 = self.grad_supervised(prob, labels) loss = (1 - self.lambda_) * loss1 + self.lambda_ * loss2 grads = [] for i in xrange(len(grads1)): grads.append((1 - self.lambda_) * grads1[i] + self.lambda_ * grads2[i]) tvars = tf.trainable_variables() grads = zip(grads, tvars) tf.scalar_summary('loss', loss) tf.scalar_summary('loss_reinforcement', loss1) tf.scalar_summary('loss_supervised', loss2) return loss, grads
def _activation_summary(self, x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def train_model(self, train_anchor_batch, train_pos_batch, train_neg_batch, model_params, train_params): # get embedding for all batches. all_batch = tf.concat( 0, [train_anchor_batch, train_pos_batch, train_neg_batch]) with tf.variable_scope("matcher"): all_feats, _ = self.build_model(all_batch, model_params) anchor_feats, pos_feats, neg_feats = tf.split(0, 3, all_feats) # compute loss. triplet_loss = dm_losses.triplet_loss( anchor_feats, pos_feats, neg_feats, 0.2, loss_type=commons.LossType.TRIPLET_L2) tf.scalar_summary("losses/triplet_loss", triplet_loss) # run training. base_model.train_model_given_loss(triplet_loss, None, train_params) # TODO (jiefeng): use proper evaluation for matcher and test.
def loss_image(prediction, mask): """Calc loss for predition on image of mask. Args. inputs: prediction image mask: true image Return: error: loss value """ print(prediction.get_shape()) print(mask.get_shape()) #mask = tf.flatten(mask) #prediction = tf.flatten(prediction) intersection = tf.reduce_sum(prediction * mask) loss = -(2. * intersection + 1.) / (tf.reduce_sum(mask) + tf.reduce_sum(prediction) + 1.) tf.scalar_summary('loss', loss) return loss
def __init__(self, sess, env_name, model_dir, variables, max_update_per_step, max_to_keep=20): self.sess = sess self.env_name = env_name self.max_update_per_step = max_update_per_step self.reset() self.max_avg_r = None with tf.variable_scope('t'): self.t_op = tf.Variable(0, trainable=False, name='t') self.t_add_op = self.t_op.assign_add(1) self.model_dir = model_dir self.saver = tf.train.Saver(variables + [self.t_op], max_to_keep=max_to_keep) self.writer = tf.train.SummaryWriter('./logs/%s' % self.model_dir, self.sess.graph) with tf.variable_scope('summary'): scalar_summary_tags = ['total r', 'avg r', 'avg q', 'avg v', 'avg a', 'avg l'] self.summary_placeholders = {} self.summary_ops = {} for tag in scalar_summary_tags: self.summary_placeholders[tag] = tf.placeholder('float32', None, name=tag.replace(' ', '_')) self.summary_ops[tag] = tf.scalar_summary('%s/%s' % (self.env_name, tag), self.summary_placeholders[tag])
def add_conv_layer(self, scope_name, layer_input, filter_size, input_channels, output_channels, padding='SAME', should_init_wb=True): with tf.variable_scope(scope_name): weights_shape = filter_size + [input_channels, output_channels] initial_weights, initial_bias = self.__get_init_params(scope_name, should_init_wb) self.total_weights += weights_shape[0] * weights_shape[1] * weights_shape[2] * weights_shape[3] self.logger.info('Weight shape:{} for scope:{}'.format(weights_shape, tf.get_variable_scope().name)) conv_weights = self.__get_variable('weights', weights_shape, tf.float32, initializer=initial_weights) tf.scalar_summary(scope_name + '/weight_sparsity', tf.nn.zero_fraction(conv_weights)) tf.histogram_summary(scope_name + '/weights', conv_weights) conv = tf.nn.conv2d(layer_input, conv_weights, strides=[1, 1, 1, 1], padding=padding) conv_biases = self.__get_variable('biases', [output_channels], tf.float32, initializer=initial_bias) layer_output = tf.nn.relu(tf.nn.bias_add(conv, conv_biases)) return layer_output
def add_activation_summary(x): """Helper to create summaries for activations. Creates a summary that provides a histogram of activations. Creates a summary that measure the sparsity of activations. Args: x: Tensor Returns: nothing """ # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name) tf.histogram_summary(tensor_name + '/activations', x) tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def add_loss_summaries(total_loss): """ Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
def _add_loss_summaries(total_loss): """Add summaries for losses in deepSpeech model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summary to all individual losses and the total loss; # do the same for the averaged version of the losses. for each_loss in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average # version of the loss as the original loss name. tf.scalar_summary(each_loss.op.name + ' (raw)', each_loss) tf.scalar_summary(each_loss.op.name, loss_averages.average(each_loss)) return loss_averages_op
def add_summaries(summaries, learning_rate, grads): """ Add summary ops""" # Track quantities for Tensorboard display summaries.append(tf.scalar_summary('learning_rate', learning_rate)) # Add histograms for gradients. for grad, var in grads: if grad is not None: summaries.append( tf.histogram_summary(var.op.name + '/gradients', grad)) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.histogram_summary(var.op.name, var)) # Build the summary operation from the last tower summaries. summary_op = tf.merge_summary(summaries) return summary_op
def loss(logits, labels,n_class, scope='loss'): with tf.variable_scope(scope): # entropy loss targets = one_hot_embedding(labels, n_class) entropy_loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, targets), name='entropy_loss') tf.add_to_collection('losses', entropy_loss) # weight l2 decay loss weight_l2_losses = [tf.nn.l2_loss(o) for o in tf.get_collection('weights')] weight_decay_loss = tf.mul(FLAGS.weight_decay, tf.add_n(weight_l2_losses), name='weight_decay_loss') tf.add_to_collection('losses', weight_decay_loss) for var in tf.get_collection('losses'): tf.scalar_summary('losses/' + var.op.name, var) # total loss return tf.add_n(tf.get_collection('losses'), name='total_loss')
def add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op
def _add_loss_summaries(total_loss): """Add summaries for losses in CIFAR-10 model. Generates moving average for all losses and associated summaries for visualizing the performance of the network. Args: total_loss: Total loss from loss(). Returns: loss_averages_op: op for generating moving averages of losses. """ # Compute the moving average of all individual losses and the total loss. loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') losses = tf.get_collection('losses') loss_averages_op = loss_averages.apply(losses + [total_loss]) # Attach a scalar summmary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Name each loss as '(raw)' and name the moving average version of the loss # as the original loss name. tf.scalar_summary(l.op.name +' (raw)', l) tf.scalar_summary(l.op.name, loss_averages.average(l)) return loss_averages_op