我们从Python开源项目中,提取了以下3个代码示例,用于说明如何使用tensorflow.contrib.slim.model_variable()。
def generator(z): # because up to now we can not derive bias_add's higher order derivative in tensorflow, # so I use vanilla implementation of FC instead of a FC layer in tensorflow.contrib.layers # the following conv case is out of the same reason weights = slim.model_variable( 'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer()) bias = slim.model_variable( 'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer) train = tf.nn.relu(ly.batch_norm(fully_connected(z, weights, bias))) train = tf.reshape(train, (-1, 4, 4, 512)) train = ly.conv2d_transpose(train, 256, 3, stride=2, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME') train = ly.conv2d_transpose(train, 128, 3, stride=2, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME') train = ly.conv2d_transpose(train, 64, 3, stride=2, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME') train = ly.conv2d_transpose(train, 1, 3, stride=1, activation_fn=None, padding='SAME', biases_initializer=None) bias = slim.model_variable('bias', shape=( 1, ), initializer=tf.zeros_initializer) train += bias train = tf.nn.tanh(train) return train
def generator(z): weights = slim.model_variable( 'fn_weights', shape=(FLAGS.z_dim, 4 * 4 * 512), initializer=ly.xavier_initializer()) bias = slim.model_variable( 'fn_bias', shape=(4 * 4 * 512, ), initializer=tf.zeros_initializer) train = tf.nn.relu(fully_connected(z, weights, bias)) train = tf.reshape(train, (-1, 4, 4, 512)) train = ly.conv2d_transpose(train, 256, 3, stride=2, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02)) train = ly.conv2d_transpose(train, 128, 3, stride=2, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02)) train = ly.conv2d_transpose(train, 64, 3, stride=2, activation_fn=tf.nn.relu, normalizer_fn=ly.batch_norm, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02)) train = ly.conv2d_transpose(train, 1, 3, stride=1, activation_fn=None, padding='SAME', weights_initializer=tf.random_normal_initializer(0, 0.02), biases_initializer=None) bias = slim.model_variable('bias', shape=( 1, ), initializer=tf.zeros_initializer) train += bias train = tf.nn.tanh(train) return train
def discriminator(img, name, target): size = 64 with tf.variable_scope(name): # img = ly.conv2d(img, num_outputs=size, kernel_size=3, # stride=2, activation_fn=None, biases_initializer=None) # bias = slim.model_variable('conv_bias', shape=( # size, ), initializer=tf.zeros_initializer) # img += bias # img = lrelu(img) img = ly.conv2d(img, num_outputs=size, kernel_size=3, stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm) img = ly.conv2d(img, num_outputs=size * 2, kernel_size=3, stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm) img = ly.conv2d(img, num_outputs=size * 4, kernel_size=3, stride=2, activation_fn=lrelu, normalizer_fn=ly.batch_norm) img = tf.reshape(img, (2 * batch_size, -1)) weights = slim.model_variable('weights', shape=[img.get_shape().as_list()[-1], 1], initializer=ly.xavier_initializer()) bias = slim.model_variable('bias', shape=( 1,), initializer=tf.zeros_initializer) logit = fully_connected(img, weights, bias) fake_logit = logit[:FLAGS.batch_size] true_logit = logit[FLAGS.batch_size:] d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( fake_logit, tf.zeros_like(fake_logit))) d_loss_true = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits( true_logit, tf.ones_like(true_logit))) f = tf.reduce_mean(d_loss_fake + d_loss_true) return f, logit, d_loss_true, d_loss_fake