我们从Python开源项目中,提取了以下13个代码示例,用于说明如何使用chainer.functions.MaxPooling2D()。
def __call__(self, x): h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.tanh(self.fc4(h)) h = F.dropout(h, ratio=.5, train=self.train) h = F.tanh(self.fc5(h)) h = F.dropout(h, ratio=.5, train=self.train) h = self.fc6(h) return h
def __init__(self): super(VGG, self).__init__() with self.init_scope(): self.conv1_1 = L.Convolution2D(3, 64, 3, stride=1, pad=1) self.conv1_2 = L.Convolution2D(64, 64, 3, stride=1, pad=1) self.conv2_1 = L.Convolution2D(64, 128, 3, stride=1, pad=1) self.conv2_2 = L.Convolution2D(128, 128, 3, stride=1, pad=1) self.conv3_1 = L.Convolution2D(128, 256, 3, stride=1, pad=1) self.conv3_2 = L.Convolution2D(256, 256, 3, stride=1, pad=1) self.conv3_3 = L.Convolution2D(256, 256, 3, stride=1, pad=1) self.conv4_1 = L.Convolution2D(256, 512, 3, stride=1, pad=1) self.conv4_2 = L.Convolution2D(512, 512, 3, stride=1, pad=1) self.conv4_3 = L.Convolution2D(512, 512, 3, stride=1, pad=1) self.conv5_1 = L.Convolution2D(512, 512, 3, stride=1, pad=1) self.conv5_2 = L.Convolution2D(512, 512, 3, stride=1, pad=1) self.conv5_3 = L.Convolution2D(512, 512, 3, stride=1, pad=1) self.fc6 = L.Linear(25088, 4096) self.fc7 = L.Linear(4096, 4096) self.fc8 = L.Linear(4096, 1000) # Keep track of the pooling indices inside each function instance self.conv_blocks = [ [self.conv1_1, self.conv1_2], [self.conv2_1, self.conv2_2], [self.conv3_1, self.conv3_2, self.conv3_3], [self.conv4_1, self.conv4_2, self.conv4_3], [self.conv5_1, self.conv5_2, self.conv5_3] ] self.deconv_blocks = [] self.mps = [F.MaxPooling2D(2, 2) for _ in self.conv_blocks]
def __init__(self, train=False): super(VGG16, self).__init__() self.trunk = [ ('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)), ('_relu1_1', F.ReLU()), ('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)), ('_relu1_2', F.ReLU()), ('_pool1', F.MaxPooling2D(2, 2)), ('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)), ('_relu2_1', F.ReLU()), ('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)), ('_relu2_2', F.ReLU()), ('_pool2', F.MaxPooling2D(2, 2)), ('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)), ('_relu3_1', F.ReLU()), ('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)), ('_relu3_2', F.ReLU()), ('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)), ('_relu3_3', F.ReLU()), ('_pool3', F.MaxPooling2D(2, 2)), ('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)), ('_relu4_1', F.ReLU()), ('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)), ('_relu4_2', F.ReLU()), ('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)), ('_relu4_3', F.ReLU()), ('_pool4', F.MaxPooling2D(2, 2)), ('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)), ('_relu5_1', F.ReLU()), ('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)), ('_relu5_2', F.ReLU()), ('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)), ('_relu5_3', F.ReLU()), ] for name, link in self.trunk: if not name.startswith('_'): self.add_link(name, link)
def __init__(self, in_channel, n_mid=64): w = math.sqrt(2) super(EncDec, self).__init__( enc=L.Convolution2D(in_channel, n_mid, 7, 1, 3, w), bn_m=L.BatchNormalization(n_mid), dec=L.Convolution2D(n_mid, n_mid, 7, 1, 3, w), bn_o=L.BatchNormalization(n_mid), ) self.p = F.MaxPooling2D(2, 2, use_cudnn=False) self.inside = None
def setUp(self): self.x = numpy.random.uniform(-1, 1, self.in_shape).astype('f') self.p = F.MaxPooling2D(2, 2, use_cudnn=False) self.pooled_y = self.p(self.x) self.gy = numpy.random.uniform( -1, 1, self.in_shape).astype(numpy.float32)
def check_invalid_dtype(self): functions.spatial_pyramid_pooling_2d( self.v, 3, functions.MaxPooling2D)
def forward(self): x = chainer.Variable(self.x) return functions.spatial_pyramid_pooling_2d( x, 3, functions.MaxPooling2D, use_cudnn=self.use_cudnn)
def check_backward(self, x_data, y_grad, use_cudnn=True): gradient_check.check_backward( functions.MaxPooling2D( 3, stride=2, pad=1, cover_all=self.cover_all, use_cudnn=use_cudnn), x_data, y_grad, **self.check_backward_options)
def __call__(self, x): """Compute an image-wise score from a batch of images Args: x (chainer.Variable): A variable with 4D image array. Returns: chainer.Variable: An image-wise score. Its channel size is :obj:`self.n_class`. """ p1 = F.MaxPooling2D(2, 2) p2 = F.MaxPooling2D(2, 2) p3 = F.MaxPooling2D(2, 2) p4 = F.MaxPooling2D(2, 2) h = F.local_response_normalization(x, 5, 1, 1e-4 / 5., 0.75) h = _pool_without_cudnn(p1, F.relu(self.conv1_bn(self.conv1(h)))) h = _pool_without_cudnn(p2, F.relu(self.conv2_bn(self.conv2(h)))) h = _pool_without_cudnn(p3, F.relu(self.conv3_bn(self.conv3(h)))) h = _pool_without_cudnn(p4, F.relu(self.conv4_bn(self.conv4(h)))) h = self._upsampling_2d(h, p4) h = self.conv_decode4_bn(self.conv_decode4(h)) h = self._upsampling_2d(h, p3) h = self.conv_decode3_bn(self.conv_decode3(h)) h = self._upsampling_2d(h, p2) h = self.conv_decode2_bn(self.conv_decode2(h)) h = self._upsampling_2d(h, p1) h = self.conv_decode1_bn(self.conv_decode1(h)) score = self.conv_classifier(h) return score
def __init__(self, train=False): super(VGG16, self).__init__() self.trunk = [ ('conv1_1', L.Convolution2D(3, 64, 3, 1, 1)), ('relu1_1', F.ReLU()), ('conv1_2', L.Convolution2D(64, 64, 3, 1, 1)), ('relu1_2', F.ReLU()), ('pool1', F.MaxPooling2D(2, 2)), ('conv2_1', L.Convolution2D(64, 128, 3, 1, 1)), ('relu2_1', F.ReLU()), ('conv2_2', L.Convolution2D(128, 128, 3, 1, 1)), ('relu2_2', F.ReLU()), ('pool2', F.MaxPooling2D(2, 2)), ('conv3_1', L.Convolution2D(128, 256, 3, 1, 1)), ('relu3_1', F.ReLU()), ('conv3_2', L.Convolution2D(256, 256, 3, 1, 1)), ('relu3_2', F.ReLU()), ('conv3_3', L.Convolution2D(256, 256, 3, 1, 1)), ('relu3_3', F.ReLU()), ('pool3', F.MaxPooling2D(2, 2)), ('conv4_1', L.Convolution2D(256, 512, 3, 1, 1)), ('relu4_1', F.ReLU()), ('conv4_2', L.Convolution2D(512, 512, 3, 1, 1)), ('relu4_2', F.ReLU()), ('conv4_3', L.Convolution2D(512, 512, 3, 1, 1)), ('relu4_3', F.ReLU()), ('pool4', F.MaxPooling2D(2, 2)), ('conv5_1', L.Convolution2D(512, 512, 3, 1, 1)), ('relu5_1', F.ReLU()), ('conv5_2', L.Convolution2D(512, 512, 3, 1, 1)), ('relu5_2', F.ReLU()), ('conv5_3', L.Convolution2D(512, 512, 3, 1, 1)), ('relu5_3', F.ReLU()), ('rpn_conv_3x3', L.Convolution2D(512, 512, 3, 1, 1)), ('rpn_relu_3x3', F.ReLU()), ] for name, link in self.trunk: if 'conv' in name: self.add_link(name, link)
def __call__(self, x): h = F.elu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = self.res2(h, self.train) h = self.res3(h, self.train) h = self.res4(h, self.train) h = self.res5(h, self.train) h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D) h = F.elu(self.conv2(h)) h = F.dropout(h, ratio=0.5) h = self.conv3(h) h = F.reshape(h, (-1, self.num_class)) return h
def _spatial_pyramid_pooling_2d(x): return F.spatial_pyramid_pooling_2d(x, 4, F.MaxPooling2D)