我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.layers.convolution2d()。
def inference(input_img): # input_latent = Input(batch_shape=noise_dim, dtype=im_dtype) with tf.variable_scope('Net_Inf') as scope: xx = layers.convolution2d(input_img, 128, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None) xx = layers.batch_norm(xx) xx = tf.nn.relu(xx) xx = layers.convolution2d(xx, 256, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None) xx = layers.batch_norm(xx) xx = tf.nn.relu(xx) xx = layers.convolution2d(xx, 512, kernel_size=(5,5), stride=(2, 2), padding='SAME', activation_fn=None) xx = layers.batch_norm(xx) xx = tf.nn.relu(xx) xx = layers.flatten(xx) xx = layers.fully_connected(xx, num_outputs=latent_size, activation_fn=None) xx = layers.batch_norm(xx) inf_latent = tf.nn.tanh(xx) return inf_latent # specify discriminative model
def BN_ReLU(self, net): """Batch Normalization and ReLU.""" # 'gamma' is not used as the next layer is ReLU net = batch_norm(net, center=True, scale=False, activation_fn=tf.nn.relu, ) self._activation_summary(net) return net # def conv2d(self, net, num_ker, ker_size, stride): # 1D-convolution net = convolution2d( net, num_outputs=num_ker, kernel_size=[ker_size, 1], stride=[stride, 1], padding='SAME', activation_fn=None, normalizer_fn=None, weights_initializer=variance_scaling_initializer(), weights_regularizer=l2_regularizer(self.weight_decay), biases_initializer=tf.zeros_initializer) return net
def model(img_in, num_actions, scope, reuse=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
def dueling_model(img_in, num_actions, scope, reuse=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores
def model(img_in, num_actions, scope, reuse=False, concat_softmax=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) if concat_softmax: out = tf.nn.softmax(out) return out
def model(img_in, num_actions, scope, reuse=False, layer_norm=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): value_out = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None) if layer_norm: value_out = layer_norm_fn(value_out, relu=True) else: value_out = tf.nn.relu(value_out) value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None) return value_out
def Actor(img_in, num_actions, scope, reuse=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) with tf.variable_scope("action_prob"): out = tf.nn.softmax(out) return out
def Critic(img_in, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=1, activation_fn=None) return out # models defined in the original code
def discriminator_stego_nn(self, img, batch_size, name): eve_input = self.image_processing_layer(img) eve_conv1 = convolution2d(eve_input, 64, kernel_size = [5, 5], stride = [2,2], activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv1') eve_conv2 = convolution2d(eve_conv1, 64 * 2, kernel_size = [5, 5], stride = [2,2], activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv2') eve_conv3 = convolution2d(eve_conv2, 64 * 4,kernel_size = [5, 5], stride = [2,2], activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv3') eve_conv4 = convolution2d(eve_conv3, 64* 8, kernel_size = [5, 5], stride = [2,2], activation_fn= tf.nn.relu, normalizer_fn = BatchNorm, scope = 'eve/' + name + '/conv4') eve_conv4 = tf.reshape(eve_conv4, [batch_size, -1]) #eve_fc = fully_connected(eve_conv4, 1, activation_fn = tf.nn.sigmoid, normalizer_fn = BatchNorm, #weights_initializer=tf.random_normal_initializer(stddev=1.0)) eve_fc = fully_connected(eve_conv4, 1, normalizer_fn = BatchNorm, weights_initializer=tf.random_normal_initializer(stddev=1.0), scope = 'eve' + name + '/final_fc') return eve_fc
def model(img_in, num_actions, scope, noisy=False, reuse=False): """As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 out = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) out = noisy_dense(out, name='noisy_fc2', size=num_actions) else: out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
def testDynamicOutputSizeWithRateOneValidPadding(self): num_filters = 32 input_size = [5, 9, 11, 3] expected_size = [None, None, None, num_filters] expected_size_dynamic = [5, 7, 9, num_filters] with self.test_session(): images = array_ops.placeholder(np.float32, [None, None, None, input_size[3]]) output = layers_lib.convolution2d( images, num_filters, [3, 3], rate=1, padding='VALID') variables_lib.global_variables_initializer().run() self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), expected_size) eval_output = output.eval({images: np.zeros(input_size, np.float32)}) self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateOneValidPaddingNCHW(self): if test.is_gpu_available(cuda_only=True): num_filters = 32 input_size = [5, 3, 9, 11] expected_size = [None, num_filters, None, None] expected_size_dynamic = [5, num_filters, 7, 9] with self.test_session(use_gpu=True): images = array_ops.placeholder(np.float32, [None, input_size[1], None, None]) output = layers_lib.convolution2d( images, num_filters, [3, 3], rate=1, padding='VALID', data_format='NCHW') variables_lib.global_variables_initializer().run() self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), expected_size) eval_output = output.eval({images: np.zeros(input_size, np.float32)}) self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testDynamicOutputSizeWithRateTwoValidPadding(self): num_filters = 32 input_size = [5, 9, 11, 3] expected_size = [None, None, None, num_filters] expected_size_dynamic = [5, 5, 7, num_filters] with self.test_session(): images = array_ops.placeholder(np.float32, [None, None, None, input_size[3]]) output = layers_lib.convolution2d( images, num_filters, [3, 3], rate=2, padding='VALID') variables_lib.global_variables_initializer().run() self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), expected_size) eval_output = output.eval({images: np.zeros(input_size, np.float32)}) self.assertListEqual(list(eval_output.shape), expected_size_dynamic)
def testWithScopeWithoutActivation(self): num_filters = 32 input_size = [5, 9, 11, 3] expected_size = [5, 5, 7, num_filters] images = random_ops.random_uniform(input_size, seed=1) output = layers_lib.convolution2d( images, num_filters, [3, 3], rate=2, padding='VALID', activation_fn=None, scope='conv7') with self.test_session() as sess: sess.run(variables_lib.global_variables_initializer()) self.assertEqual(output.op.name, 'conv7/BiasAdd') self.assertListEqual(list(output.eval().shape), expected_size)
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores_centered else: return action_scores return out
def register_conv_layer_functions(name, f): explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name) @TensorBuilder.Register1("tf.contrib.layers", name + "_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder) def layer_function(*args, **kwargs): kwargs['activation_fn'] = f return convolution2d(*args, **kwargs)
def atari_model(img_in, num_actions, scope, reuse=False): # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
def simple_model(img_in, num_actions, scope, reuse=False, num_filters=64): with tf.variable_scope(scope, reuse=reuse): out = img_in gauss_initializer = initializers.xavier_initializer(uniform=False) # stddev = 1/n with tf.variable_scope("convnet"): out = layers.convolution2d( out, num_outputs=num_filters, kernel_size=8, stride=4, activation_fn=tf.nn.relu, weights_initializer=gauss_initializer, trainable=False) out = layers.flatten(out) with tf.variable_scope("action_value"): out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None) return out
def dueling_model(img_in, num_actions, scope, reuse=False, layer_norm=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("state_value"): state_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None) if layer_norm: state_hidden = layer_norm_fn(state_hidden, relu=True) else: state_hidden = tf.nn.relu(state_hidden) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): actions_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None) if layer_norm: actions_hidden = layer_norm_fn(actions_hidden, relu=True) else: actions_hidden = tf.nn.relu(actions_hidden) action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False): with tf.variable_scope(scope, reuse=reuse): out = inpt with tf.variable_scope("convnet"): for num_outputs, kernel_size, stride in convs: out = layers.convolution2d(out, num_outputs=num_outputs, kernel_size=kernel_size, stride=stride, activation_fn=tf.nn.relu) conv_out = layers.flatten(out) with tf.variable_scope("action_value"): action_out = conv_out for hidden in hiddens: action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None) if layer_norm: action_out = layers.layer_norm(action_out, center=True, scale=True) action_out = tf.nn.relu(action_out) action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None) if dueling: with tf.variable_scope("state_value"): state_out = conv_out for hidden in hiddens: state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None) if layer_norm: state_out = layers.layer_norm(state_out, center=True, scale=True) state_out = tf.nn.relu(state_out) state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1) q_out = state_score + action_scores_centered else: q_out = action_scores return q_out
def model(H, x, training): net = dropout(x, 0.5, is_training = training) # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu) # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu) # net = max_pool2d(net, [2, 2], padding = 'VALID') # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu) # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu) # net = max_pool2d(net, [2, 2], padding = 'VALID') # ksize = net.get_shape().as_list() # net = max_pool2d(net, [ksize[1], ksize[2]]) net = fully_connected(flatten(net), 256, activation_fn = tf.nn.relu) net = dropout(net, 0.5, is_training = training) logits = fully_connected(net, 1, activation_fn = tf.nn.sigmoid) preds = tf.cast(tf.greater(logits, 0.5), tf.int64) return logits, preds
def get_network(self, input_tensor, is_training): # Load pre-trained inception-resnet model with slim.arg_scope(inception_resnet_v2_arg_scope(batch_norm_decay = 0.999, weight_decay = 0.0001)): net, end_points = inception_resnet_v2(input_tensor, is_training = is_training) # Adding some modification to original InceptionResnetV2 - changing scoring of AUXILIARY TOWER weight_decay = 0.0005 with tf.variable_scope('NewInceptionResnetV2'): with tf.variable_scope('AuxiliaryScoring'): with slim.arg_scope([layers.convolution2d, layers.convolution2d_transpose], weights_regularizer = slim.l2_regularizer(weight_decay), biases_regularizer = slim.l2_regularizer(weight_decay), activation_fn = None): tf.summary.histogram('Last_layer/activations', net, [KEY_SUMMARIES]) # Scoring net = slim.dropout(net, 0.7, is_training = is_training, scope = 'Dropout') net = layers.convolution2d(net, num_outputs = self.FEATURES, kernel_size = 1, stride = 1, scope = 'Scoring_layer') feature = net tf.summary.histogram('Scoring_layer/activations', net, [KEY_SUMMARIES]) # Upsampling net = layers.convolution2d_transpose(net, num_outputs = 16, kernel_size = 17, stride = 17, padding = 'VALID', scope = 'Upsampling_layer') tf.summary.histogram('Upsampling_layer/activations', net, [KEY_SUMMARIES]) # Smoothing layer - separable gaussian filters net = super()._get_gauss_smoothing_net(net, size = self.SMOOTH_SIZE, std = 1.0, kernel_sum = 0.2) return net, feature
def residual_block(net, ch = 256, ch_inner = 128, scope = None, reuse = None, stride = 1): """ Bottleneck v2 """ with slim.arg_scope([layers.convolution2d], activation_fn = None, normalizer_fn = None): with tf.variable_scope(scope, 'ResidualBlock', reuse = reuse): in_net = net if stride > 1: net = layers.convolution2d(net, ch, kernel_size = 1, stride = stride) in_net = layers.batch_norm(in_net) in_net = tf.nn.relu(in_net) in_net = layers.convolution2d(in_net, ch_inner, 1) in_net = layers.batch_norm(in_net) in_net = tf.nn.relu(in_net) in_net = layers.convolution2d(in_net, ch_inner, 3, stride = stride) in_net = layers.batch_norm(in_net) in_net = tf.nn.relu(in_net) in_net = layers.convolution2d(in_net, ch, 1, activation_fn = None) net = tf.nn.relu(in_net + net) return net
def conv1d(self, net, num_ker, ker_size, stride): # 1D-convolution net = convolution2d( net, num_outputs=num_ker, kernel_size=[ker_size, 1], stride=[stride, 1], padding='SAME', activation_fn=None, normalizer_fn=None, weights_initializer=variance_scaling_initializer(), weights_regularizer=l2_regularizer(self.weight_decay), biases_initializer=tf.zeros_initializer) return net
def conv2d(self, net, num_ker, ker_size, stride): net = convolution2d( net, num_outputs=num_ker, kernel_size=[ker_size, ker_size], stride=[stride, stride], padding='SAME', activation_fn=None, normalizer_fn=None, weights_initializer=variance_scaling_initializer(), weights_regularizer=l2_regularizer(FLAGS.weight_decay), biases_initializer=tf.zeros_initializer) return net
def forward(image, num_actions): # Conv1 out = layers.convolution2d(image, num_outputs=16, kernel_size=8, stride=4, activation_fn=tf.nn.relu, scope='conv1') out = layers.convolution2d(out, num_outputs=32, kernel_size=4, stride=2, activation_fn=tf.nn.relu, scope='conv2') out = layers.flatten(out, scope='flatten') out = layers.fully_connected(out, num_outputs=256, activation_fn=tf.nn.relu, scope='fc1') action_logprobs = tf.nn.log_softmax(layers.fully_connected(out, num_outputs=num_actions, activation_fn=None, scope='fc_actor')) value = layers.fully_connected(out, num_outputs=1, activation_fn=None, scope='fc_critic') value = tf.reshape(value, [-1]) return action_logprobs, value
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False): """As described in https://arxiv.org/abs/1511.06581""" with tf.variable_scope(scope, reuse=reuse): out = img_in with tf.variable_scope("convnet"): # original architecture out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu) out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu) out = layers.flatten(out) with tf.variable_scope("state_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 state_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) state_score = noisy_dense(state_hidden, name='noisy_fc2', size=1) else: state_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None) with tf.variable_scope("action_value"): if noisy: # Apply noisy network on fully connected layers # ref: https://arxiv.org/abs/1706.10295 actions_hidden = noisy_dense(out, name='noisy_fc1', size=512, activation_fn=tf.nn.relu) action_scores = noisy_dense(actions_hidden, name='noisy_fc2', size=num_actions) else: actions_hidden = layers.fully_connected(out, num_outputs=512, activation_fn=tf.nn.relu) action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None) action_scores_mean = tf.reduce_mean(action_scores, 1) action_scores = action_scores - tf.expand_dims(action_scores_mean, 1) return state_score + action_scores
def policy_and_value_network(observations): # TODO: Baseline network, used in (Mnih et al., 2016) conv = tf_layers.convolution2d(observations, 16, 8, 4) conv = tf_layers.convolution2d(conv, 32, 4, 2) conv = tf_layers.flatten(conv) hidden_layer = tf_layers.fully_connected(conv, 128, activation_fn=tf.nn.relu) logits = tf_layers.linear(hidden_layer, env.actions) value = tf_layers.linear(hidden_layer, 1) # TODO: If you do not want to use baseline, uncomment the next line # value = tf.zeros([tf.shape(observations)[0], 1]) return logits, value
def conv_learn(X, y, mode): # Ensure our images are 2d X = tf.reshape(X, [-1, 36, 36, 1]) # We'll need these in one-hot format y = tf.one_hot(tf.cast(y, tf.int32), 5, 1, 0) # conv layer will compute 4 kernels for each 5x5 patch with tf.variable_scope('conv_layer'): # 5x5 convolution, pad with zeros on edges h1 = layers.convolution2d(X, num_outputs=4, kernel_size=[5, 5], activation_fn=tf.nn.relu) # 2x2 Max pooling, no padding on edges p1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # Need to flatten conv output for use in dense layer p1_size = np.product( [s.value for s in p1.get_shape()[1:]]) p1f = tf.reshape(p1, [-1, p1_size ]) # densely connected layer with 32 neurons and dropout h_fc1 = layers.fully_connected(p1f, 5, activation_fn=tf.nn.relu) drop = layers.dropout(h_fc1, keep_prob=0.5, is_training=mode == tf.contrib.learn.ModeKeys.TRAIN) logits = layers.fully_connected(drop, 5, activation_fn=None) loss = tf.losses.softmax_cross_entropy(y, logits) # Setup the training function manually train_op = layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return tf.argmax(logits, 1), loss, train_op # Use generic estimator with our function
def testInvalidDataFormat(self): height, width = 7, 9 with self.test_session(): images = random_ops.random_uniform((5, height, width, 3), seed=1) with self.assertRaisesRegexp(ValueError, 'data_format'): layers_lib.convolution2d(images, 32, 3, data_format='CHWN')
def testCreateConv(self): height, width = 7, 9 with self.test_session(): images = np.random.uniform(size=(5, height, width, 4)) output = layers_lib.convolution2d(images, 32, [3, 3]) self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) weights = variables.get_variables_by_name('weights')[0] self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) biases = variables.get_variables_by_name('biases')[0] self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateConvNCHW(self): height, width = 7, 9 with self.test_session(): images = np.random.uniform(size=(5, 4, height, width)) output = layers_lib.convolution2d(images, 32, [3, 3], data_format='NCHW') self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, 32, height, width]) weights = variables.get_variables_by_name('weights')[0] self.assertListEqual(weights.get_shape().as_list(), [3, 3, 4, 32]) biases = variables.get_variables_by_name('biases')[0] self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateSquareConv(self): height, width = 7, 9 with self.test_session(): images = random_ops.random_uniform((5, height, width, 3), seed=1) output = layers_lib.convolution2d(images, 32, 3) self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testCreateConvWithTensorShape(self): height, width = 7, 9 with self.test_session(): images = random_ops.random_uniform((5, height, width, 3), seed=1) output = layers_lib.convolution2d(images, 32, images.get_shape()[1:3]) self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32])
def testFullyConvWithCustomGetter(self): height, width = 7, 9 with self.test_session(): called = [0] def custom_getter(getter, *args, **kwargs): called[0] += 1 return getter(*args, **kwargs) with variable_scope.variable_scope('test', custom_getter=custom_getter): images = random_ops.random_uniform((5, height, width, 32), seed=1) layers_lib.convolution2d(images, 64, images.get_shape()[1:3]) self.assertEqual(called[0], 2) # Custom getter called twice.
def testCreateVerticalConv(self): height, width = 7, 9 with self.test_session(): images = random_ops.random_uniform((5, height, width, 4), seed=1) output = layers_lib.convolution2d(images, 32, [3, 1]) self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) weights = variables.get_variables_by_name('weights')[0] self.assertListEqual(weights.get_shape().as_list(), [3, 1, 4, 32]) biases = variables.get_variables_by_name('biases')[0] self.assertListEqual(biases.get_shape().as_list(), [32])
def testCreateHorizontalConv(self): height, width = 7, 9 with self.test_session(): images = random_ops.random_uniform((5, height, width, 4), seed=1) output = layers_lib.convolution2d(images, 32, [1, 3]) self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height, width, 32]) weights = variables.get_variables_by_name('weights')[0] self.assertListEqual(weights.get_shape().as_list(), [1, 3, 4, 32])
def testCreateConvWithStride(self): height, width = 6, 8 with self.test_session(): images = random_ops.random_uniform((5, height, width, 3), seed=1) output = layers_lib.convolution2d(images, 32, [3, 3], stride=2) self.assertEqual(output.op.name, 'Conv/Relu') self.assertListEqual(output.get_shape().as_list(), [5, height / 2, width / 2, 32])
def testCreateConvCreatesWeightsAndBiasesVars(self): height, width = 7, 9 images = random_ops.random_uniform((5, height, width, 3), seed=1) with self.test_session(): self.assertFalse(variables.get_variables('conv1/weights')) self.assertFalse(variables.get_variables('conv1/biases')) layers_lib.convolution2d(images, 32, [3, 3], scope='conv1') self.assertTrue(variables.get_variables('conv1/weights')) self.assertTrue(variables.get_variables('conv1/biases'))
def testCreateConvWithCollection(self): height, width = 7, 9 images = random_ops.random_uniform((5, height, width, 3), seed=1) with ops.name_scope('fe'): conv = layers_lib.convolution2d( images, 32, [3, 3], outputs_collections='outputs', scope='Conv') output_collected = ops.get_collection('outputs')[0] self.assertEqual(output_collected.aliases, ['fe/Conv']) self.assertEqual(output_collected, conv)