我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用tensorflow.contrib.layers.linear()。
def linear_autoencoder_discriminator( x, output_dim, hidden_sizes, encoding_dim, scope='Discriminator', reuse=False, pretrained=None): with tf.variable_scope(scope, reuse=reuse): # Encoder. for hsz in hidden_sizes: x = tf.nn.elu(layers.linear(x, hsz)) encoding = x = layers.linear(x, encoding_dim) # Decoder. for hsz in reversed(hidden_sizes): x = tf.nn.elu(layers.linear(x, hsz)) decoding = layers.linear(x, output_dim * output_dim) if pretrained is not None: tf.contrib.framework.init_from_checkpoint( pretrained, {'Discriminator/': 'Discriminator/'}) return decoding, None
def build_loss(self, beta, task, weight_decay): batch_size = task['batch_size'] with tf.variable_scope("network") as scope: network = self.build_network(self.x) logits = linear(network, num_outputs=10) with tf.name_scope('loss'): kl_terms = [ batch_average(kl) for kl in tf.get_collection('kl_terms') ] if not kl_terms: kl_terms = [ tf.constant(0.)] N_train = self.dataset['train'][0].shape[0] Lz = tf.add_n(kl_terms)/N_train Lx = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=self.y)) beta = tf.constant(beta) L2 = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() ]) loss = Lx + beta * Lz + weight_decay * L2 correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(self.y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) self.loss = loss self.error = (1. - accuracy) * 100. self.Lx = Lx self.Lz = Lz self.beta = beta
def create_architecture(self): self.vars.sequence_length = tf.placeholder(tf.int64, [1], name="sequence_length") fc_input = self.get_input_layers() fc1 = layers.fully_connected(fc_input, self.fc_units_num, scope=self._name_scope + "/fc1") fc1_reshaped = tf.reshape(fc1, [1, -1, self.fc_units_num]) self.recurrent_cells = self._get_ru_class()(self._recurrent_units_num) state_c = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.c], name="initial_lstm_state_c") state_h = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.h], name="initial_lstm_state_h") self.vars.initial_network_state = LSTMStateTuple(state_c, state_h) rnn_outputs, self.ops.network_state = tf.nn.dynamic_rnn(self.recurrent_cells, fc1_reshaped, initial_state=self.vars.initial_network_state, sequence_length=self.vars.sequence_length, scope=self._name_scope) reshaped_rnn_outputs = tf.reshape(rnn_outputs, [-1, self._recurrent_units_num]) q = layers.linear(reshaped_rnn_outputs, num_outputs=self.actions_num, scope=self._name_scope + "/q") self.reset_state() return q
def create_architecture(self, img_input, misc_input, name_scope, reuse=False, **specs): with arg_scope([layers.conv2d, layers.fully_connected], reuse=reuse), \ arg_scope([], reuse=reuse): fc_input = self.get_input_layers(img_input, misc_input, name_scope) fc1 = layers.fully_connected(fc_input, num_outputs=self.fc_units_num, scope=name_scope + "/fc1") fc2_value = layers.fully_connected(fc1, num_outputs=256, scope=name_scope + "/fc2_value") value = layers.linear(fc2_value, num_outputs=1, scope=name_scope + "/fc3_value") fc2_advantage = layers.fully_connected(fc1, num_outputs=256, scope=name_scope + "/fc2_advantage") advantage = layers.linear(fc2_advantage, num_outputs=self.actions_num, scope=name_scope + "/fc3_advantage") mean_advantage = tf.reshape(tf.reduce_mean(advantage, axis=1), (-1, 1)) q_op = advantage + (mean_advantage - value) return q_op
def sequence_discriminator(x, length, hidden_size, scope='Discriminator', reuse=False): with tf.variable_scope(scope, reuse=reuse): x = tf.unstack(x, length, 1) cell = tf.nn.rnn_cell.GRUCell(hidden_size) _, encoding = tf.nn.rnn(x, cell) return layers.linear(encoding, 1), None
def linear_generator(x, hidden_size): with tf.variable_scope('Generator'): h0 = tf.nn.softplus(layers.linear(x, hidden_size)) return layers.linear(h0, 1)
def linear_discriminator(x, hidden_size, scope='Discriminator', reuse=False): with tf.variable_scope(scope, reuse=reuse): h0 = tf.tanh(layers.linear(x, hidden_size * 2)) h1 = tf.tanh(layers.linear(h0, hidden_size * 2)) h2 = tf.tanh(layers.linear(h1, hidden_size * 2)) return tf.sigmoid(layers.linear(h2, 1)), None
def autoencoder_discriminator(x, hidden_size, scope='Discriminator', reuse=False): with tf.variable_scope(scope, reuse=reuse): e0 = tf.nn.elu(layers.linear(x, hidden_size)) e1 = layers.linear(e0, 1) d1 = tf.nn.elu(layers.linear(e1, hidden_size)) d0 = layers.linear(d1, 1) return d0, e1
def linear_generator(x, output_dim, scope='Generator'): with tf.variable_scope(scope): return layers.linear(x, output_dim * output_dim)
def linear_discriminator(x, hidden_size, scope='Discriminator', reuse=False): with tf.variable_scope(scope, reuse=reuse): h0 = tf.tanh(layers.linear(x, hidden_size * 2)) h1 = tf.tanh(layers.linear(h0, hidden_size * 2)) h2 = tf.tanh(layers.linear(h1, hidden_size * 2)) return tf.sigmoid(layers.linear(h2, 1))
def __init__(self, clf_pic_name, input_dims, hidden_size, num_decoder_symbols, learning_rate=0.01, maxlen_to_decode=16): self.clf_pic_name = clf_pic_name # we will save the model here # set up some common variables self.start_of_sequence_id = 0 # this will help us to terminate the seq self.end_of_sequence_id = 0 self.encoder_hidden_size = hidden_size self.decoder_hidden_size = self.encoder_hidden_size self.learning_rate = learning_rate self.decoder_sequence_length = maxlen_to_decode #7 #max length that decoder will predict before terminating # placeholders and variables self.encoder_length = tf.placeholder(tf.int32, [None]) # seq length for dynamic time unrolling self.decoder_length = tf.placeholder(tf.int32, [None]) # seq length for dynamic time unrolling self.encoder_embedding_size = input_dims # self.decoder_embedding_size = self.encoder_embedding_size self.decoder_embeddings = tf.get_variable('decoder_embeddings', [self.decoder_embedding_size, self.decoder_embedding_size],) # self.num_decoder_symbols = num_decoder_symbols #self.decoder_embedding_size # number of output classes of decoder with tf.variable_scope("rnn") as scope: # setting up weights for computing the final output self.output_fn = lambda x: layers.linear(x, self.num_decoder_symbols, scope=scope) self.inputs = tf.placeholder("float", [None, None, self.encoder_embedding_size]) self.decoder_inputs = tf.placeholder("float", [None, None, self.decoder_embedding_size]) self.encoder_targets = tf.placeholder("float", [None, None, self.num_decoder_symbols]) self.decoder_targets = tf.placeholder("float", [None, None, self.num_decoder_symbols]) # build model - compute graph self.encoder() self.decoder_train() self.decoder_inference() self.compute_cost() self.optimize() self.get_sm_outputs() return
def encoder(self): self.encoder_outputs, self.encoder_state = tf.nn.dynamic_rnn( cell=tf.contrib.rnn.GRUCell(self.encoder_hidden_size), inputs=self.inputs, sequence_length=self.encoder_length, dtype=tf.float32, scope="rnn", time_major=False) # tf.variable_scope("rnn") self.encoder_outputs = tf.contrib.layers.linear(self.encoder_state, self.num_decoder_symbols) self.encoder_softmax_outputs = tf.nn.softmax(self.encoder_outputs) return #encoder_outputs, encoder_state
def q_network(observations): return tf_layers.linear(observations, env.actions, biases_initializer=None)
def policy_network(observations): hidden = tf_layers.fully_connected(observations, args.hidden_layer, activation_fn=tf.nn.relu) logits = tf_layers.linear(hidden, env.actions) return logits
def policy_and_value_network(observations): # TODO: Baseline network, used in (Mnih et al., 2016) conv = tf_layers.convolution2d(observations, 16, 8, 4) conv = tf_layers.convolution2d(conv, 32, 4, 2) conv = tf_layers.flatten(conv) hidden_layer = tf_layers.fully_connected(conv, 128, activation_fn=tf.nn.relu) logits = tf_layers.linear(hidden_layer, env.actions) value = tf_layers.linear(hidden_layer, 1) # TODO: If you do not want to use baseline, uncomment the next line # value = tf.zeros([tf.shape(observations)[0], 1]) return logits, value
def policy_and_value_network(observations): # TODO: Example network, you may choose another hidden_layer = tf_layers.fully_connected(observations, 200, activation_fn=tf.nn.relu) hidden_layer = tf_layers.fully_connected(hidden_layer, 100, activation_fn=tf.nn.relu) logits = tf_layers.linear(hidden_layer, env.actions) value = tf_layers.linear(hidden_layer, 1) return logits, value
def output_fn(cell_output): if cell_output is None: return tf.zeros([LETTERS], tf.float32) # only used for shape inference else: return tf_layers.linear(cell_output, num_outputs=LETTERS, scope="rnn_output") # Input function (makes rnn input from word id and cell state)
def policy_value_layer(self, inputs): pi = fully_connected(inputs, num_outputs=self.actions_num, scope=self._name_scope + "/fc_pi", activation_fn=tf.nn.softmax) state_value = linear(inputs, num_outputs=1, scope=self._name_scope + "/fc_value") v = tf.reshape(state_value, [-1]) return pi, v
def create_architecture(self): fc_input = self.get_input_layers() fc1 = layers.fully_connected(fc_input, num_outputs=self.fc_units_num, scope=self._name_scope + "/fc1") q = layers.linear(fc1, num_outputs=self.actions_num, scope=self._name_scope + "/q") return q
def inference_net(x, latent_size): return layers.linear(x, latent_size)
def generative_net(z, data_size): return layers.linear(z, data_size)
def _regression_head(label_name=None, weight_column_name=None, label_dimension=1, enable_centered_bias=False, head_name=None): """Creates a _Head for linear regression. Args: label_name: String, name of the key in label dict. Can be null if label is a tensor (single headed models). weight_column_name: A string defining feature column name representing weights. It is used to down weight or boost examples during training. It will be multiplied by the loss of the example. label_dimension: Number of regression labels per example. This is the size of the last dimension of the labels `Tensor` (typically, this has shape `[batch_size, label_dimension]`). enable_centered_bias: A bool. If True, estimator will learn a centered bias variable for each class. Rest of the model structure learns the residual after centered bias. head_name: name of the head. If provided, predictions, summary and metrics keys will be suffixed by `"/" + head_name` and the default variable scope will be `head_name`. Returns: An instance of _Head """ return _RegressionHead( label_name=label_name, weight_column_name=weight_column_name, label_dimension=label_dimension, enable_centered_bias=enable_centered_bias, head_name=head_name) # TODO(zakaria): Add logistic_regression_head
def _logits(logits_input, logits, logits_dimension): """Validate logits args, and create `logits` if necessary. Exactly one of `logits_input` and `logits` must be provided. Args: logits_input: `Tensor` input to `logits`. logits: `Tensor` output. logits_dimension: Integer, last dimension of `logits`. This is used to create `logits` from `logits_input` if `logits` is `None`; otherwise, it's used to validate `logits`. Returns: `logits` `Tensor`. Raises: ValueError: if neither or both of `logits` and `logits_input` are supplied. """ if (logits_dimension is None) or (logits_dimension < 1): raise ValueError("Invalid logits_dimension %s." % logits_dimension) # If not provided, create logits. if logits is None: if logits_input is None: raise ValueError("Neither logits nor logits_input supplied.") return layers_lib.linear(logits_input, logits_dimension, scope="logits") if logits_input is not None: raise ValueError("Both logits and logits_input supplied.") logits = ops.convert_to_tensor(logits, name="logits") logits_dims = logits.get_shape().dims if logits_dims is not None: logits_dims[-1].assert_is_compatible_with(logits_dimension) return logits
def _logistic_regression_model_fn(features, labels, mode): _ = mode logits = layers.linear( features, 1, weights_initializer=init_ops.zeros_initializer(), # Intentionally uses really awful initial values so that # AUC/precision/recall/etc will change meaningfully even on a toy dataset. biases_initializer=init_ops.constant_initializer(-10.0)) predictions = math_ops.sigmoid(logits) loss = loss_ops.sigmoid_cross_entropy(logits, labels) train_op = optimizers.optimize_loss( loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1) return predictions, loss, train_op
def create_architecture(self): self.vars.sequence_length = tf.placeholder(tf.int64, [1], name="sequence_length") fc_input = self.get_input_layers() fc1 = fully_connected(fc_input, num_outputs=self.fc_units_num, scope=self._name_scope + "/fc1") fc1_reshaped = tf.reshape(fc1, [1, -1, self.fc_units_num]) self.recurrent_cells = self.ru_class(self._recurrent_units_num) state_c = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.c], name="initial_lstm_state_c") state_h = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.h], name="initial_lstm_state_h") self.vars.initial_network_state = LSTMStateTuple(state_c, state_h) rnn_outputs, self.ops.network_state = tf.nn.dynamic_rnn(self.recurrent_cells, fc1_reshaped, initial_state=self.vars.initial_network_state, sequence_length=self.vars.sequence_length, time_major=False, scope=self._name_scope) reshaped_rnn_outputs = tf.reshape(rnn_outputs, [-1, self._recurrent_units_num]) self.reset_state() self.ops.pi = fully_connected(reshaped_rnn_outputs, num_outputs=self.actions_num, scope=self._name_scope + "/fc_pi", activation_fn=tf.nn.softmax) state_value = linear(reshaped_rnn_outputs, num_outputs=1, scope=self._name_scope + "/fc_value") self.ops.v = tf.reshape(state_value, [-1]) if self.multi_frameskip: frameskip_output_len = self.actions_num else: frameskip_output_len = 1 if self.fs_stop_gradient: reshaped_rnn_outputs = tf.stop_gradient(reshaped_rnn_outputs) self.ops.frameskip_mu = 1 + fully_connected(reshaped_rnn_outputs, num_outputs=frameskip_output_len, scope=self._name_scope + "/fc_frameskip_mu", activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer(self.fs_mu_bias)) self.ops.frameskip_variance = fully_connected(reshaped_rnn_outputs, num_outputs=frameskip_output_len, scope=self._name_scope + "/fc_frameskip_variance", activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer( self.fs_sigma_bias)) if not self.multi_frameskip: self.ops.frameskip_mu = tf.reshape(self.ops.frameskip_mu, (-1,)) self.ops.frameskip_variance = tf.reshape(self.ops.frameskip_variance, (-1,)) self.ops.frameskip_sigma = tf.sqrt(self.ops.frameskip_variance, name="frameskip_sigma") self.ops.frameskip_policy = [self.ops.frameskip_mu, self.ops.frameskip_sigma]
def create_architecture(self): self.vars.sequence_length = tf.placeholder(tf.int64, [1], name="sequence_length") fc_input = self.get_input_layers() fc1 = fully_connected(fc_input, num_outputs=self.fc_units_num, scope=self._name_scope + "/fc1", ) fc1_reshaped = tf.reshape(fc1, [1, -1, self.fc_units_num]) self.recurrent_cells = self.ru_class(self._recurrent_units_num) state_c = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.c], name="initial_lstm_state_c") state_h = tf.placeholder(tf.float32, [1, self.recurrent_cells.state_size.h], name="initial_lstm_state_h") self.vars.initial_network_state = LSTMStateTuple(state_c, state_h) rnn_outputs, self.ops.network_state = tf.nn.dynamic_rnn(self.recurrent_cells, fc1_reshaped, initial_state=self.vars.initial_network_state, sequence_length=self.vars.sequence_length, time_major=False, scope=self._name_scope) reshaped_rnn_outputs = tf.reshape(rnn_outputs, [-1, self._recurrent_units_num]) self.reset_state() self.ops.pi_logits = fully_connected(reshaped_rnn_outputs, num_outputs=self.actions_num, scope=self._name_scope + "/fc_pi", activation_fn=None) self.ops.pi = tf.nn.softmax(self.ops.pi_logits) state_value = linear(reshaped_rnn_outputs, num_outputs=1, scope=self._name_scope + "/fc_value") self.ops.v = tf.reshape(state_value, [-1]) if self.multi_frameskip: frameskip_output_len = self.actions_num else: frameskip_output_len = 1 if self.fs_stop_gradient: reshaped_rnn_outputs = tf.stop_gradient(reshaped_rnn_outputs) self.ops.frameskip_n = 1 + fully_connected(reshaped_rnn_outputs, num_outputs=frameskip_output_len, scope=self._name_scope + "/fc_frameskip_n", activation_fn=tf.nn.relu, biases_initializer=tf.constant_initializer(self.fs_n_bias)) frameskip_p = fully_connected(reshaped_rnn_outputs, num_outputs=frameskip_output_len, scope=self._name_scope + "/fc_frameskip_p", activation_fn=tf.nn.sigmoid, biases_initializer=tf.constant_initializer(self.fs_p_bias)) eps = 1e-20 self.ops.frameskip_p = tf.clip_by_value(frameskip_p, eps, 1 - eps) if not self.multi_frameskip: self.ops.frameskip_n = tf.reshape(self.ops.frameskip_n, (-1,)) self.ops.frameskip_p = tf.reshape(self.ops.frameskip_p, (-1,)) self.ops.frameskip_policy = [self.ops.frameskip_n, self.ops.frameskip_p]