我们从Python开源项目中,提取了以下9个代码示例,用于说明如何使用tensorflow.contrib.layers.optimize_loss()。
def autoencoder_model(feature, target, mode, params): """Autoencodes sequence model.""" vocab_size = params.get('vocab_size') embed_dim = params.get('embed_dim') tf.identity(feature[0], name='feature') embed_feature = sequence.embed_features( feature, vocab_size=vocab_size, embed_dim=embed_dim) output, _ = sequence.sequence_autoencoder_discriminator( embed_feature, length=FLAGS.max_doc_length, hidden_size=embed_dim) logits, predictions = sequence.outbed_generated(output) # Loss and training. loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, feature) loss = tf.reduce_mean(tf.reduce_sum(loss, axis=1)) train_op = layers.optimize_loss( loss, tf.train.get_global_step(), learning_rate=params['learning_rate'], optimizer=params.get('optimizer', 'Adam')) return predictions, loss, train_op
def autoencoder_model(feature, target, mode, params): """Autoencodes features with given function.""" autoencoder_fn = params.get('autoencoder_fn') feature_processor = params.get('feature_processor', lambda f: f) generated_postprocess = params.get('generated_postprocess', lambda f: f) # Process features. feature = feature_processor(feature) # Auto-encode. generated, _ = autoencoder_fn(feature) # Loss and training. loss = tf.contrib.losses.mean_squared_error(feature, generated) train_op = layers.optimize_loss( loss, tf.train.get_global_step(), learning_rate=params['learning_rate'], optimizer=params.get('optimizer', 'Adam')) # Post process generated. prediction = generated_postprocess(generated) prediction = tf.identity(prediction, name='generated') return prediction, loss, train_op
def build_train_graph(loss, learning_rate=0.001, clip_norm=5.0): """ builds training graph """ train_args = {"learning_rate": learning_rate, "clip_norm": clip_norm} logger.debug("building training graph: %s.", train_args) learning_rate = tf.placeholder_with_default(learning_rate, [], "learning_rate") global_step = tf.Variable(0, name='global_step', trainable=False) train_op = layers.optimize_loss(loss, global_step, learning_rate, "Adam", clip_gradients=clip_norm) model = {"global_step": global_step, "train_op": train_op, "learning_rate": learning_rate, "train_args": train_args} return model
def _get_model_fn(self, model_fn): """Backward compatibility way of adding class weight and IS_TRAINING. TODO(ipolosukhin): Remove this function after new layers are available. Specifically: * dropout and batch norm should work via update ops. * class weights should be retrieved from weights column or hparams. Args: model_fn: Core model function. Returns: Model function. """ def _model_fn(features, targets, mode): """Model function.""" ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train') if self.class_weight is not None: constant_op.constant(self.class_weight, name='class_weight') predictions, loss = model_fn(features, targets) if isinstance(self.learning_rate, types.FunctionType): learning_rate = self.learning_rate(contrib_framework.get_global_step()) else: learning_rate = self.learning_rate if isinstance(self.optimizer, types.FunctionType): optimizer = self.optimizer(learning_rate) else: optimizer = self.optimizer train_op = layers.optimize_loss( loss, contrib_framework.get_global_step(), learning_rate=learning_rate, optimizer=optimizer, clip_gradients=self.clip_gradients) return predictions, loss, train_op return _model_fn
def conv_learn(X, y, mode): # Ensure our images are 2d X = tf.reshape(X, [-1, 36, 36, 1]) # We'll need these in one-hot format y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0) # conv layer will compute 4 kernels for each 5x5 patch with tf.variable_scope('conv_layer'): # 5x5 convolution, pad with zeros on edges h1 = layers.convolution2d(X, num_outputs=4, kernel_size=[5, 5], activation_fn=tf.nn.relu) # 2x2 Max pooling, no padding on edges p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # Need to flatten conv output for use in dense layer p1_size = np.product( [s.value for s in p1.get_shape()[1:]]) p1f = tf.reshape(p1, [-1, p1_size ]) # densely connected layer with 32 neurons and dropout h_fc1 = layers.fully_connected(p1f, 5, activation_fn=tf.nn.relu) drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN) logits = layers.fully_connected(drop, 5, activation_fn=None) loss = tf.losses.softmax_cross_entropy(y, logits) # Setup the training function manually train_op = layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return tf.argmax(logits, 1), loss, train_op # Use generic estimator with our function
def rnn_segment(features, targets, mode, params): seq_feature = features['seq_feature'] seq_length = features['seq_length'] with tf.variable_scope("emb"): embeddings = tf.get_variable("char_emb", shape=[params['num_char'], params['emb_size']]) seq_emb = tf.nn.embedding_lookup(embeddings, seq_feature) batch_size = tf.shape(seq_feature)[0] time_step = tf.shape(seq_feature)[1] flat_seq_emb = tf.reshape(seq_emb, shape=[batch_size, time_step, (params['k'] + 1) * params['emb_size']]) cell = rnn.LSTMCell(params['rnn_units']) if mode == ModeKeys.TRAIN: cell = rnn.DropoutWrapper(cell, params['input_keep_prob'], params['output_keep_prob']) projection_cell = rnn.OutputProjectionWrapper(cell, params['num_class']) logits, _ = tf.nn.dynamic_rnn(projection_cell, flat_seq_emb, sequence_length=seq_length, dtype=tf.float32) weight_mask = tf.to_float(tf.sequence_mask(seq_length)) loss = seq2seq.sequence_loss(logits, targets, weights=weight_mask) train_op = layers.optimize_loss( loss=loss, global_step=tf.contrib.framework.get_global_step(), learning_rate=params["learning_rate"], optimizer=tf.train.AdamOptimizer, clip_gradients=params['grad_clip'], summaries=[ "learning_rate", "loss", "gradients", "gradient_norm", ]) pred_classes = tf.to_int32(tf.argmax(input=logits, axis=2)) pred_words = tf.logical_or(tf.equal(pred_classes, 0), tf.equal(pred_classes, 3)) target_words = tf.logical_or(tf.equal(targets, 0), tf.equal(targets, 3)) precision = metrics.streaming_precision(pred_words, target_words, weights=weight_mask) recall = metrics.streaming_recall(pred_words, target_words, weights=weight_mask) predictions = { "classes": pred_classes } eval_metric_ops = { "precision": precision, "recall": recall } return learn.ModelFnOps(mode, predictions, loss, train_op, eval_metric_ops=eval_metric_ops)
def build(self): self.output = self._generator(self.input, name='gene') self.content_loss = tf.reduce_mean(tf.multiply(tf.log1p(self.output),\ tf.abs(tf.subtract(self.target, self.output)))) assert ten_sh(self.output) == ten_sh(self.target) self.concat_output = tf.concat(1, (self.input, self.output)) self.concat_target = tf.concat(1, (self.input, self.target)) self.fake_em = self._critic(self.concat_output, name='critic') self.true_em = self._critic(self.concat_target, name='critic', reuse=True) self.c_loss = tf.reduce_mean(self.fake_em - self.true_em, name='c_loss') self.g_loss = tf.reduce_mean(-self.fake_em, name='g_loss') ####summary#### conntent_loss_sum = tf.summary.scalar('content_loss', self.content_loss) c_loss_sum = tf.summary.scalar('c_loss', self.c_loss) g_loss_sum = tf.summary.scalar('g_loss', self.g_loss) img_sum = tf.summary.image('gene_img', self.concat_output, max_outputs=1) img_sum = tf.summary.image('tar_img', self.concat_target, max_outputs=1) self.summary = tf.summary.merge_all() ############## theta_g = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope='gene') theta_c = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic') counter_g = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32) counter_c = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32) self.c_opt = ly.optimize_loss(loss=self.c_loss, learning_rate=self.c_lr,\ optimizer=tf.train.RMSPropOptimizer,\ variables=theta_c,\ global_step=counter_c) self.g_opt = ly.optimize_loss(loss=self.g_loss, learning_rate=self.g_lr,\ optimizer=tf.train.RMSPropOptimizer,\ variables=theta_g,\ global_step=counter_g) self.content_opt = ly.optimize_loss(loss=self.content_loss, learning_rate=self.g_lr,\ optimizer=tf.train.RMSPropOptimizer,\ variables=theta_g,\ global_step=counter_g) clipped_c_var = [tf.assign(var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper)) \ for var in theta_c] with tf.control_dependencies([self.c_opt]): self.c_opt = tf.tuple(clipped_c_var)
def build(self): self.output = self._generator(self.input, name='gene') self.content_loss = tf.reduce_mean(tf.multiply(tf.log1p(self.output),\ tf.abs(tf.subtract(self.target, self.output)))) assert ten_sh(self.output) == ten_sh(self.target) self.eva_op = tf.concat(1, \ (tf.exp(self.input*12.0)-1, tf.exp(self.output*8.0)-1), name='eva_op') self.concat_output = tf.exp(tf.concat(1, (self.input, self.output))) self.concat_target = tf.exp(tf.concat(1, (self.input, self.target))) self.fake_em = self._critic(self.concat_output, name='critic') self.true_em = self._critic(self.concat_target, name='critic', reuse=True) self.c_loss = tf.reduce_mean(self.fake_em - self.true_em, name='c_loss') self.g_loss = tf.reduce_mean(-self.fake_em, name='g_loss') ####summary#### conntent_loss_sum = tf.summary.scalar('content_loss', self.content_loss) c_loss_sum = tf.summary.scalar('c_loss', self.c_loss) g_loss_sum = tf.summary.scalar('g_loss', self.g_loss) img_sum = tf.summary.image('gene_img', self.concat_output, max_outputs=1) img_sum = tf.summary.image('tar_img', self.concat_target, max_outputs=1) self.summary = tf.summary.merge_all() ############## theta_g = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope='gene') theta_c = tf.get_collection( tf.GraphKeys.TRAINABLE_VARIABLES, scope='critic') counter_g = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32) counter_c = tf.Variable(trainable=False, initial_value=0, dtype=tf.int32) self.c_opt = ly.optimize_loss(loss=self.c_loss, learning_rate=self.c_lr,\ optimizer=tf.train.RMSPropOptimizer,\ variables=theta_c,\ global_step=counter_c) self.g_opt = ly.optimize_loss(self.g_loss, learning_rate=self.g_lr,\ optimizer=tf.train.RMSPropOptimizer,\ variables=theta_g,\ global_step=counter_g) self.content_opt = ly.optimize_loss(self.content_loss, learning_rate=self.g_lr,\ optimizer=tf.train.RMSPropOptimizer,\ variables=theta_g,\ global_step=counter_g) clipped_c_var = [tf.assign(var, tf.clip_by_value(var, self.clamp_lower, self.clamp_upper)) \ for var in theta_c] with tf.control_dependencies([self.c_opt]): self.c_opt = tf.tuple(clipped_c_var)