我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用tensorflow.contrib.layers.max_pool2d()。
def image_processing_layers(self) -> List[tf.Tensor]: """Do all convolutions and return the last conditional map. Applies convolutions on the input tensor with optional max pooling. All the intermediate layers are stored in the `image_processing_layers` attribute. There is not dropout between the convolutional layers, by default the activation function is ReLU. """ last_layer = self.image_input image_processing_layers = [] # type: List[tf.Tensor] with tf.variable_scope("convolutions"): for i, (filter_size, n_filters, pool_size) in enumerate(self.convolutions): with tf.variable_scope("cnn_layer_{}".format(i)): last_layer = conv2d(last_layer, n_filters, filter_size) image_processing_layers.append(last_layer) if pool_size: last_layer = max_pool2d(last_layer, pool_size) image_processing_layers.append(last_layer) return image_processing_layers
def get_inception_layer( inputs, conv11_size, conv33_11_size, conv33_size, conv55_11_size, conv55_size, pool11_size ): with tf.variable_scope("conv_1x1"): conv11 = layers.conv2d( inputs, conv11_size, [ 1, 1 ] ) with tf.variable_scope("conv_3x3"): conv33_11 = layers.conv2d( inputs, conv33_11_size, [ 1, 1 ] ) conv33 = layers.conv2d( conv33_11, conv33_size, [ 3, 3 ] ) with tf.variable_scope("conv_5x5"): conv55_11 = layers.conv2d( inputs, conv55_11_size, [ 1, 1 ] ) conv55 = layers.conv2d( conv55_11, conv55_size, [ 5, 5 ] ) with tf.variable_scope("pool_proj"): pool_proj = layers.max_pool2d( inputs, [ 3, 3 ], stride = 1 ) pool11 = layers.conv2d( pool_proj, pool11_size, [ 1, 1 ] ) if tf.__version__ == '0.11.0rc0': return tf.concat(3, [conv11, conv33, conv55, pool11]) return tf.concat([conv11, conv33, conv55, pool11], 3)
def _block_b_reduce(net, endpoints, scope='BlockReduceB'): # 17 x 17 -> 8 x 8 reduce with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'): with tf.variable_scope(scope): with tf.variable_scope('Br1_Pool'): br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2') with tf.variable_scope('Br2_3x3'): br2 = layers.conv2d(net, 192, [1, 1], padding='SAME', scope='Conv1_1x1') br2 = layers.conv2d(br2, 192, [3, 3], stride=2, scope='Conv2_3x3/2') with tf.variable_scope('Br3_7x7x3'): br3 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1') br3 = layers.conv2d(br3, 256, [1, 7], padding='SAME', scope='Conv2_1x7') br3 = layers.conv2d(br3, 320, [7, 1], padding='SAME', scope='Conv3_7x1') br3 = layers.conv2d(br3, 320, [3, 3], stride=2, scope='Conv4_3x3/2') net = tf.concat(3, [br1, br2, br3], name='Concat1') endpoints[scope] = net print('%s output shape: %s' % (scope, net.get_shape())) return net
def __call__(self, inputs, reuse = True): with tf.variable_scope(self.name) as vs: # tf.get_variable_scope() if reuse: vs.reuse_variables() x = tcl.conv2d(inputs, num_outputs = 64, kernel_size = (4, 4), stride = (1, 1), padding = 'SAME') x = tcl.batch_norm(x) x = tf.nn.relu(x) x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME') x = tcl.conv2d(x, num_outputs = 128, kernel_size = (4, 4), stride = (1, 1), padding = 'SAME') x = tcl.batch_norm(x) x = tf.nn.relu(x) x = tcl.max_pool2d(x, (2, 2), (2, 2), 'SAME') x = tcl.flatten(x) logits = tcl.fully_connected(x, num_outputs = self.num_output) return logits
def __call__(self, inputs, reuse = True): with tf.variable_scope(self.name) as vs: tf.get_variable_scope() if reuse: vs.reuse_variables() conv1 = tcl.conv2d(inputs, num_outputs = 64, kernel_size = (7, 7), stride = (2, 2), padding = 'SAME') conv1 = tcl.batch_norm(conv1) conv1 = tf.nn.relu(conv1) conv1 = tcl.max_pool2d(conv1, kernel_size = (3, 3), stride = (2, 2), padding = 'SAME') x = conv1 filters = 64 first_layer = True for i, r in enumerate(self.repetitions): x = _residual_block(self.block_fn, filters = filters, repetition = r, is_first_layer = first_layer)(x) filters *= 2 if first_layer: first_layer = False _, h, w, ch = x.shape.as_list() outputs = tcl.avg_pool2d(x, kernel_size = (h, w), stride = (1, 1)) outputs = tcl.flatten(outputs) logits = tcl.fully_connected(outputs, num_outputs = self.num_output, activation_fn = None) return logits
def down_block(block_fn, filters): def f(inputs): x = block_fn(filters)(inputs) x = block_fn(filters)(x) down = tcl.max_pool2d(x, kernel_size = (3, 3), stride = (2, 2), padding = 'SAME') return x, down # x:same size of inputs, down: downscaled return f
def model(H, x, training): net = dropout(x, 0.5, is_training = training) # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu) # net = conv2d(net, 64, [3, 3], activation_fn = tf.nn.relu) # net = max_pool2d(net, [2, 2], padding = 'VALID') # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu) # net = conv2d(net, 128, [3, 3], activation_fn = tf.nn.relu) # net = max_pool2d(net, [2, 2], padding = 'VALID') # ksize = net.get_shape().as_list() # net = max_pool2d(net, [ksize[1], ksize[2]]) net = fully_connected(flatten(net), 256, activation_fn = tf.nn.relu) net = dropout(net, 0.5, is_training = training) logits = fully_connected(net, 1, activation_fn = tf.nn.sigmoid) preds = tf.cast(tf.greater(logits, 0.5), tf.int64) return logits, preds
def vgg_16_fn(input_tensor: tf.Tensor, scope='vgg_16', blocks=5, weight_decay=0.0005) \ -> (tf.Tensor, list): # list of tf.Tensors (layers) intermediate_levels = [] # intermediate_levels.append(input_tensor) with slim.arg_scope(nets.vgg.vgg_arg_scope(weight_decay=weight_decay)): with tf.variable_scope(scope, 'vgg_16', [input_tensor]) as sc: input_tensor = mean_substraction(input_tensor) end_points_collection = sc.original_name_scope + '_end_points' # Collect outputs for conv2d, fully_connected and max_pool2d. with slim.arg_scope( [layers.conv2d, layers.fully_connected, layers.max_pool2d], outputs_collections=end_points_collection): net = layers.repeat( input_tensor, 2, layers.conv2d, 64, [3, 3], scope='conv1') intermediate_levels.append(net) net = layers.max_pool2d(net, [2, 2], scope='pool1') if blocks >= 2: net = layers.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2') intermediate_levels.append(net) net = layers.max_pool2d(net, [2, 2], scope='pool2') if blocks >= 3: net = layers.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3') intermediate_levels.append(net) net = layers.max_pool2d(net, [2, 2], scope='pool3') if blocks >= 4: net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4') intermediate_levels.append(net) net = layers.max_pool2d(net, [2, 2], scope='pool4') if blocks >= 5: net = layers.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5') intermediate_levels.append(net) net = layers.max_pool2d(net, [2, 2], scope='pool5') return net, intermediate_levels
def _stem(inputs, endpoints, num_filters=64): with tf.variable_scope('Stem'): net = layers.conv2d(inputs, num_filters, [7, 7], stride=2, scope='Conv1_7x7') net = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3') endpoints['Stem'] = net print("Stem output size: ", net.get_shape()) return net #@add_arg_scope
def _block_a(net, endpoints, d=64, scope='BlockA'): with tf.variable_scope(scope): net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3') net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3') net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2') return net
def _block_b(net, endpoints, d=256, scope='BlockB'): with tf.variable_scope(scope): net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3') net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3') net = endpoints[scope+'/Conv3'] = layers.conv2d(net, d, [3, 3], scope='Conv3_3x3') net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2') return net
def _block_c(net, endpoints, d=256, scope='BlockC'): with tf.variable_scope(scope): net = endpoints[scope+'/Conv1'] = layers.conv2d(net, d, [3, 3], scope='Conv1_3x3') net = endpoints[scope+'/Conv2'] = layers.conv2d(net, d, [3, 3], scope='Conv2_3x3') net = endpoints[scope+'/Conv3'] = layers.conv2d(net, d, [3, 3], scope='Conv3_3x3') net = endpoints[scope+'/Conv4'] = layers.conv2d(net, d, [3, 3], scope='Conv4_3x3') net = endpoints[scope+'/Pool1'] = layers.max_pool2d(net, [2, 2], stride=2, scope='Pool1_2x2/2') return net
def _build_vgg16( inputs, num_classes=1000, dropout_keep_prob=0.5, is_training=True, scope=''): """Blah""" endpoints = {} with tf.name_scope(scope, 'vgg16', [inputs]): with arg_scope( [layers.batch_norm, layers.dropout], is_training=is_training): with arg_scope( [layers.conv2d, layers.max_pool2d], stride=1, padding='SAME'): net = _block_a(inputs, endpoints, d=64, scope='Scale1') net = _block_a(net, endpoints, d=128, scope='Scale2') net = _block_b(net, endpoints, d=256, scope='Scale3') net = _block_b(net, endpoints, d=512, scope='Scale4') net = _block_b(net, endpoints, d=512, scope='Scale5') logits = _block_output(net, endpoints, num_classes, dropout_keep_prob) endpoints['Predictions'] = tf.nn.softmax(logits, name='Predictions') return logits, endpoints
def _block_a_reduce(net, endpoints, k=192, l=224, m=256, n=384, scope='BlockReduceA'): # 35 x 35 -> 17 x 17 reduce # inception-v4: k=192, l=224, m=256, n=384 # inception-resnet-v1: k=192, l=192, m=256, n=384 # inception-resnet-v2: k=256, l=256, m=384, n=384 # default padding = VALID # default stride = 1 with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'): with tf.variable_scope(scope): with tf.variable_scope('Br1_Pool'): br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2') # 17 x 17 x input with tf.variable_scope('Br2_3x3'): br2 = layers.conv2d(net, n, [3, 3], stride=2, scope='Conv1_3x3/2') # 17 x 17 x n with tf.variable_scope('Br3_3x3Dbl'): br3 = layers.conv2d(net, k, [1, 1], padding='SAME', scope='Conv1_1x1') br3 = layers.conv2d(br3, l, [3, 3], padding='SAME', scope='Conv2_3x3') br3 = layers.conv2d(br3, m, [3, 3], stride=2, scope='Conv3_3x3/2') # 17 x 17 x m net = tf.concat(3, [br1, br2, br3], name='Concat1') # 17 x 17 x input + n + m # 1024 for v4 (384 + 384 + 256) # 896 for res-v1 (256 + 384 +256) # 1152 for res-v2 (384 + 384 + 384) endpoints[scope] = net print('%s output shape: %s' % (scope, net.get_shape())) return net
def _block_stem_res(net, endpoints, scope='Stem'): # Simpler _stem for inception-resnet-v1 network # NOTE observe endpoints of first 3 layers # default padding = VALID # default stride = 1 with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'): with tf.variable_scope(scope): # 299 x 299 x 3 net = layers.conv2d(net, 32, [3, 3], stride=2, scope='Conv1_3x3/2') endpoints[scope + '/Conv1'] = net # 149 x 149 x 32 net = layers.conv2d(net, 32, [3, 3], scope='Conv2_3x3') endpoints[scope + '/Conv2'] = net # 147 x 147 x 32 net = layers.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv3_3x3') endpoints[scope + '/Conv3'] = net # 147 x 147 x 64 net = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2') # 73 x 73 x 64 net = layers.conv2d(net, 80, [1, 1], padding='SAME', scope='Conv4_1x1') # 73 x 73 x 80 net = layers.conv2d(net, 192, [3, 3], scope='Conv5_3x3') # 71 x 71 x 192 net = layers.conv2d(net, 256, [3, 3], stride=2, scope='Conv6_3x3/2') # 35 x 35 x 256 endpoints[scope] = net print('%s output shape: %s' % (scope, net.get_shape())) return net
def _block_b_reduce_res(net, endpoints, ver=2, scope='BlockReduceB'): # 17 x 17 -> 8 x 8 reduce # configure branch filter numbers br3_num = 256 br4_num = 256 if ver == 1: br3_inc = 0 br4_inc = 0 else: br3_inc = 32 br4_inc = 32 with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'): with tf.variable_scope(scope): with tf.variable_scope('Br1_Pool'): br1 = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2') with tf.variable_scope('Br2_3x3'): br2 = layers.conv2d(net, 256, [1, 1], padding='SAME', scope='Conv1_1x1') br2 = layers.conv2d(br2, 384, [3, 3], stride=2, scope='Conv2_3x3/2') with tf.variable_scope('Br3_3x3'): br3 = layers.conv2d(net, br3_num, [1, 1], padding='SAME', scope='Conv1_1x1') br3 = layers.conv2d(br3, br3_num + br3_inc, [3, 3], stride=2, scope='Conv2_3x3/2') with tf.variable_scope('Br4_3x3Dbl'): br4 = layers.conv2d(net, br4_num, [1, 1], padding='SAME', scope='Conv1_1x1') br4 = layers.conv2d(br4, br4_num + 1*br4_inc, [3, 3], padding='SAME', scope='Conv2_3x3') br4 = layers.conv2d(br4, br4_num + 2*br4_inc, [3, 3], stride=2, scope='Conv3_3x3/2') net = tf.concat(3, [br1, br2, br3, br4], name='Concat1') # 8 x 8 x 1792 v1, 2144 v2 (paper indicates 2048 but only get this if we use a v1 config for this block) endpoints[scope] = net print('%s output shape: %s' % (scope, net.get_shape())) return net
def discriminator(inputs, reuse=False): with tf.variable_scope('discriminator'): if reuse: tf.get_variable_scope().reuse_variables() net = lays.conv2d_transpose(inputs, 64, 3, stride=1, scope='conv1', padding='SAME', activation_fn=leaky_relu) net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max1') net = lays.conv2d_transpose(net, 128, 3, stride=1, scope='conv2', padding='SAME', activation_fn=leaky_relu) net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max2') net = lays.conv2d_transpose(net, 256, 3, stride=1, scope='conv3', padding='SAME', activation_fn=leaky_relu) net = lays.max_pool2d(net, 2, 2, 'SAME', scope='max3') net = tf.reshape(net, (batch_size, 4 * 4 * 256)) net = lays.fully_connected(net, 128, scope='fc1', activation_fn=leaky_relu) net = lays.dropout(net, 0.5) net = lays.fully_connected(net, 1, scope='fc2', activation_fn=None) return net
def _block_stem(net, endpoints, scope='Stem'): # Stem shared by inception-v4 and inception-resnet-v2 (resnet-v1 uses simpler _stem below) # NOTE observe endpoints of first 3 layers with arg_scope([layers.conv2d, layers.max_pool2d, layers.avg_pool2d], padding='VALID'): with tf.variable_scope(scope): # 299 x 299 x 3 net = layers.conv2d(net, 32, [3, 3], stride=2, scope='Conv1_3x3/2') endpoints[scope + '/Conv1'] = net # 149 x 149 x 32 net = layers.conv2d(net, 32, [3, 3], scope='Conv2_3x3') endpoints[scope + '/Conv2'] = net # 147 x 147 x 32 net = layers.conv2d(net, 64, [3, 3], padding='SAME', scope='Conv3_3x3') endpoints[scope + '/Conv3'] = net # 147 x 147 x 64 with tf.variable_scope('Br1A_Pool'): br1a = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool1_3x3/2') with tf.variable_scope('Br1B_3x3'): br1b = layers.conv2d(net, 96, [3, 3], stride=2, scope='Conv4_3x3/2') net = tf.concat(3, [br1a, br1b], name='Concat1') endpoints[scope + '/Concat1'] = net # 73 x 73 x 160 with tf.variable_scope('Br2A_3x3'): br2a = layers.conv2d(net, 64, [1, 1], padding='SAME', scope='Conv5_1x1') br2a = layers.conv2d(br2a, 96, [3, 3], scope='Conv6_3x3') with tf.variable_scope('Br2B_7x7x3'): br2b = layers.conv2d(net, 64, [1, 1], padding='SAME', scope='Conv5_1x1') br2b = layers.conv2d(br2b, 64, [7, 1], padding='SAME', scope='Conv6_7x1') br2b = layers.conv2d(br2b, 64, [1, 7], padding='SAME', scope='Conv7_1x7') br2b = layers.conv2d(br2b, 96, [3, 3], scope='Conv8_3x3') net = tf.concat(3, [br2a, br2b], name='Concat2') endpoints[scope + '/Concat2'] = net # 71 x 71 x 192 with tf.variable_scope('Br3A_3x3'): br3a = layers.conv2d(net, 192, [3, 3], stride=2, scope='Conv9_3x3/2') with tf.variable_scope('Br3B_Pool'): br3b = layers.max_pool2d(net, [3, 3], stride=2, scope='Pool2_3x3/2') net = tf.concat(3, [br3a, br3b], name='Concat3') endpoints[scope + '/Concat3'] = net print('%s output shape: %s' % (scope, net.get_shape())) # 35x35x384 return net