我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用chainer.functions.average_pooling_2d()。
def __call__(self, x, t, train=True, finetune=False): # First conv layer h = self[0](x) # Residual blocks for i in range(1, len(self) - 2): h = self[i](h, train, finetune) # BN, relu, pool, final layer h = self[-2](h) h = F.relu(h) n, nc, ns, nx, ny = h.data.shape h = F.reshape(h, (n, nc * ns, nx, ny)) h = F.average_pooling_2d(h, ksize=h.data.shape[2:]) h = self[-1](h) h = F.reshape(h, h.data.shape[:2]) return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def __call__(self, x, t, train=True, finetune=False): h = x # First conv layer h = self[0](h) # Residual blocks for i in range(1, len(self) - 2): h = self[i](h, train, finetune) # BN, relu, pool, final layer h = self[-2](h) h = F.relu(h) h = F.average_pooling_2d(h, ksize=h.data.shape[2:]) h = self[-1](h) h = F.reshape(h, h.data.shape[:2]) return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
def __call__(self, x, train=True): h = self.conv1(x, train) h = self.conv2(h, train) h = self.conv3(h, train) h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1)) h = self.conv4(h, train) h = self.conv5(h, train) h = self.conv6(h, train) h = self.inception_f5_1(h, train) h = self.inception_f5_2(h, train) h = self.inception_f5_3(h, train) h = self.inception_f6_1(h, train) h = self.inception_f6_2(h, train) h = self.inception_f6_3(h, train) h = self.inception_f6_4(h, train) h = self.inception_f6_5(h, train) h = self.inception_f7_1(h, train) h = self.inception_f7_2(h, train) num, categories, y, x = h.data.shape # global average pooling h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories)) h = F.dropout(h, ratio=0.2, train=train) h = self.linear(h) return h
def _setup_pooling(self, layer): param = layer.pooling_param ksize = _get_ksize(param) stride = _get_stride(param) pad = _get_pad(param) if param.pool == param.MAX: func = functions.max_pooling_2d elif param.pool == param.AVE: func = functions.average_pooling_2d else: raise RuntimeError('Stochastic pooling is not supported') fw = _SingleArgumentFunction(func, ksize, stride=stride, pad=pad) self.forwards[layer.name] = fw self._add_layer(layer)
def check_forward(self, x_data, use_cudnn=True): x = chainer.Variable(x_data) y = functions.average_pooling_2d(x, 3, stride=2, pad=1, use_cudnn=use_cudnn) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) self.assertEqual(self.gy.shape, y_data.shape) for k in six.moves.range(2): for c in six.moves.range(3): x = self.x[k, c] expect = numpy.array([ [x[0:2, 0:2].sum(), x[0:2, 1:3].sum()], [x[1:4, 0:2].sum(), x[1:4, 1:3].sum()]]) / 9 gradient_check.assert_allclose( expect, y_data[k, c], **self.check_forward_options)
def __call__(self, pmap, fmap, cmap): fmap = self.conv0(fmap) fmap = F.relu(fmap) cmap = F.average_pooling_2d(cmap, ksize=8, stride=8) h = F.concat((fmap, pmap, cmap), 1) h = self.conv1(h) h = F.relu(h) h = self.conv2(h) h = F.relu(h) h = self.conv3(h) h = F.relu(h) h = self.conv4(h) h = F.relu(h) h = self.conv5(h) return h
def __call__(self, x): h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = F.average_pooling_2d(h, 2, 2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) h = F.average_pooling_2d(h, 2, 2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.relu(self.conv3_3(h)) h = F.relu(self.conv3_4(h)) h = F.average_pooling_2d(h, 2, 2) h = F.relu(self.fc4(h)) h = F.relu(self.fc5(h)) h = self.fc6(h) L_out = h return L_out
def __call__(self, x, train=True): h = self.conv1(x, train) h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1)) h = self.conv2_1x1(h, train) h = self.conv2_3x3(h, train) h = F.max_pooling_2d(h, ksize=(3, 3), stride=(2, 2), pad=(1, 1)) h = self.inception3a(h, train) h = self.inception3b(h, train) h = self.inception3c(h, train) h = self.inception4a(h, train) h = self.inception4b(h, train) h = self.inception4c(h, train) h = self.inception4d(h, train) h = self.inception4e(h, train) h = self.inception5a(h, train) h = self.inception5b(h, train) num, categories, y, x = h.data.shape # global average pooling h = F.reshape(F.average_pooling_2d(h, (y, x)), (num, categories)) h = self.linear(h) return h
def __call__(self, x_0: chainer.Variable, x_1: chainer.Variable) -> typing.List[chainer.Variable]: hs = [] h = self.c0_0(x_0) if self.will_concat: h = F.concat([h, self.c0_1(x_1)]) h = self.c1(h) hs.append(self.out_1(chainer.functions.average_pooling_2d(h, (h.shape[2], h.shape[3])))) # hs.append(chainer.functions.average_pooling_2d h = self.c2(h) hs.append(self.out_2(chainer.functions.average_pooling_2d(h, (h.shape[2], h.shape[3])))) h = self.c3(h) h = self.c4(h) hs.append(h) return hs
def __call__(self, x, train=False): h = F.relu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire2(h) h = self.fire3(h) h = self.fire4(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire5(h) h = self.fire6(h) h = self.fire7(h) h = self.fire8(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.fire9(h) h = F.dropout(h, ratio=0.5, train=train) h = F.relu(self.conv10(h)) h = F.average_pooling_2d(h, 13) return F.reshape(h, (-1, 1000))
def __call__(self, x): conv1_1 = F.relu(self.vgg.conv1_1(x)) conv1_2 = F.relu(self.vgg.conv1_2(conv1_1)) pool1 = F.average_pooling_2d(conv1_2, 2, stride=2) conv2_1 = F.relu(self.vgg.conv2_1(pool1)) conv2_2 = F.relu(self.vgg.conv2_2(conv2_1)) pool2 = F.average_pooling_2d(conv2_2, 2, stride=2) conv3_1 = F.relu(self.vgg.conv3_1(pool2)) conv3_2 = F.relu(self.vgg.conv3_2(conv3_1)) conv3_3 = F.relu(self.vgg.conv3_3(conv3_2)) conv3_4 = F.relu(self.vgg.conv3_4(conv3_3)) pool3 = F.average_pooling_2d(conv3_4, 2, stride=2) conv4_1 = F.relu(self.vgg.conv4_1(pool3)) conv4_2 = F.relu(self.vgg.conv4_2(conv4_1)) conv4_3 = F.relu(self.vgg.conv4_3(conv4_2)) conv4_4 = F.relu(self.vgg.conv4_4(conv4_3)) pool4 = F.average_pooling_2d(conv4_4, 2, stride=2) conv5_1 = F.relu(self.vgg.conv5_1(pool4)) return tuple([conv1_1, conv2_1, conv3_1, conv4_1, conv5_1, conv4_2])
def __call__(self, x, t): self.clear() h = self.bn1(self.conv1(x), test=not self.train) h = F.max_pooling_2d(F.relu(h), 3, stride=2) h = self.res2(h, self.train) h = self.res3(h, self.train) h = self.res4(h, self.train) h = self.res5(h, self.train) h = F.average_pooling_2d(h, 7, stride=1) if t=="feature": return h h = self.fc(h) if self.train: self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss else: return h
def __call__(self, x, t, predict=False): h = self.bn1(self.conv1(x), test=not self.train) h = F.max_pooling_2d(F.relu(h), 2, stride=2) h = self.bn2(self.conv2(h), test=not self.train) h = F.max_pooling_2d(F.relu(h), 2, stride=2) h = F.dropout(F.relu(self.conv3(h)), ratio=0.6, train=self.train) h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2) h = F.average_pooling_2d(F.relu(self.conv5(h)), 3, stride=1) h = F.dropout(F.relu(self.fc6(h)), ratio=0.6, train=self.train) h = self.fc7(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) if predict: return h else: return self.loss
def reduct(self, x): h = F.relu(self.conv1_1(x)) h = F.relu(self.bn1(self.conv1_2(h))) # 100 -> 50 h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.bn2(self.conv2_2(h))) # 50 -> 25 h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.bn3(self.conv3_2(h))) # 25 -> (25 + 1 * 2 - 3) / 3 + 1 = 9 h = F.max_pooling_2d(h, 3, stride=3, pad=1) h = F.relu(self.conv4_1(h)) h = F.relu(self.bn4(self.conv4_2(h))) # 9 -> 1 h = F.average_pooling_2d(h, 9, stride=1) return h
def reduct(self, x): h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = self.bn1(h) # 100 -> 50 h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2(h)) h = self.bn2(h) # 50 -> 25 h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3(h)) h = self.bn3(h) # 25 -> (25 + 1 * 2 - 3) / 3 + 1 = 9 h = F.max_pooling_2d(h, 3, stride=3, pad=1) h = F.relu(self.conv4(h)) h = self.bn4(h) # 9 -> 1 h = F.average_pooling_2d(h, 9, stride=1) return h
def __call__(self, x): h = self.st(x) h = F.average_pooling_2d(h, 2, 2) # For TC and RTS datasets h = F.relu(self.conv1(h)) h = F.max_pooling_2d(h, 2, 2) h = F.relu(self.conv2(h)) h = F.max_pooling_2d(h, 2, 2) h = self.fc(h) return h
def __call__(self, x): h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.dropout(h, 0.25) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.average_pooling_2d(h, h.shape[-2:]) h = self.fc19(h) return h
def __call__(self, x): h = F.leaky_relu(self.bias1(self.bn1(self.conv1(x), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias2(self.bn2(self.conv2(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias3(self.bn3(self.conv3(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias4(self.bn4(self.conv4(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias5(self.bn5(self.conv5(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias6(self.bn6(self.conv6(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias7(self.bn7(self.conv7(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias8(self.bn8(self.conv8(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias9(self.bn9(self.conv9(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias10(self.bn10(self.conv10(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias11(self.bn11(self.conv11(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias12(self.bn12(self.conv12(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias13(self.bn13(self.conv13(h), finetune=self.finetune)), slope=0.1) h = F.max_pooling_2d(h, ksize=2, stride=2, pad=0) h = F.leaky_relu(self.bias14(self.bn14(self.conv14(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias15(self.bn15(self.conv15(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias16(self.bn16(self.conv16(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias17(self.bn17(self.conv17(h), finetune=self.finetune)), slope=0.1) h = F.leaky_relu(self.bias18(self.bn18(self.conv18(h), finetune=self.finetune)), slope=0.1) h = F.average_pooling_2d(h, h.shape[-2:]) h = self.fc19(h) return h
def max_or_ave(word='ave'): if word == 'ave': return F.average_pooling_2d return F.max_pooling_2d
def __call__(self, x, test): h = F.relu(self.b1(self.c1(x), test=test)) h = self.b2(self.c2(h), test=test) if x.data.shape != h.data.shape: xp = chainer.cuda.get_array_module(x.data) n, c, hh, ww = x.data.shape pad_c = h.data.shape[1] - c p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32) p = chainer.Variable(p, volatile=test) x = F.concat((p, x)) if x.data.shape[2:] != h.data.shape[2:]: x = F.average_pooling_2d(x, 1, 2) return h + x
def _global_average_pooling_2d(x): n_rois, n_channel, H, W = x.array.shape h = F.average_pooling_2d(x, (H, W), stride=1) h = F.reshape(h, (n_rois, n_channel)) return h
def __call__(self, x, t, before_fc=False): self.clear() h = self.bn1(self.conv1(x), test=not self.train) h = F.max_pooling_2d(F.relu(h), 3, stride=2) h = self.res2(h, self.train) h = self.res3(h, self.train) h = self.res4(h, self.train) h = self.res5(h, self.train) h = F.average_pooling_2d(h, h.data.shape[2], stride=1) self.feature = h return h
def maybe_pooling(self, x): if 2 in self.strides: return F.average_pooling_2d(x, 1, 2, 0) return x
def __call__(self, x, train=False): h = self.conv_bn_relu(x, train) for i, n in enumerate(self.block_num): for ii in six.moves.range(n): h = self['resnext_block_{}_{}'.format(i + 1, ii + 1)](h, train) batch, channels, height, width = h.data.shape h = F.reshape(F.average_pooling_2d(h, (height, width)), (batch, channels, 1, 1)) return F.reshape(self.linear(h, train), (batch, self.category_num))
def forward_one_step(self, x, test): f = activations[self.activation_function] chain = [x] # Hidden convolutinal layers for i in range(self.n_hidden_layers): u = getattr(self, "layer_%i" % i)(chain[-1]) if self.apply_batchnorm: if i == 0 and self.apply_batchnorm_to_input is False: pass else: u = getattr(self, "batchnorm_%i" % i)(u, test=test) chain.append(f(u)) if self.projection_type == "fully_connection": u = self.projection_layer(chain[-1]) if self.apply_batchnorm: u = self.projection_batchnorm(u, test=test) chain.append(f(u)) elif self.projection_type == "global_average_pooling": batch_size = chain[-1].data.shape[0] n_maps = chain[-1].data[0].shape[0] chain.append(F.average_pooling_2d(chain[-1], self.top_filter_size)) chain.append(F.reshape(chain[-1], (batch_size, n_maps))) u = self.projection_layer(chain[-1]) if self.apply_batchnorm: u = self.projection_batchnorm(u, test=test) chain.append(f(u)) else: raise NotImplementedError() return chain[-1]
def __call__(self, x): return functions.average_pooling_2d(x, self.ksize, self.stride, self.pad)
def __call__(self, x, t): self.clear() test = not self.train h = F.max_pooling_2d( F.relu(self.norm1(self.conv1(x), test=test)), 3, stride=2, pad=1) h = F.max_pooling_2d( F.relu(self.norm2(self.conv2(h), test=test)), 3, stride=2, pad=1) h = self.inc3a(h) h = self.inc3b(h) h = self.inc3c(h) h = self.inc4a(h) a = F.average_pooling_2d(h, 5, stride=3) a = F.relu(self.norma(self.conva(a), test=test)) a = F.relu(self.norma2(self.lina(a), test=test)) a = self.outa(a) self.loss1 = F.softmax_cross_entropy(a, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) b = F.average_pooling_2d(h, 5, stride=3) b = F.relu(self.normb(self.convb(b), test=test)) b = F.relu(self.normb2(self.linb(b), test=test)) b = self.outb(b) self.loss2 = F.softmax_cross_entropy(b, t) h = self.inc4e(h) h = self.inc5a(h) h = F.average_pooling_2d(self.inc5b(h), 7) h = self.out(h) self.loss3 = F.softmax_cross_entropy(h, t) self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3 self.accuracy = F.accuracy(h, t) return self.loss
def forward(self): x = chainer.Variable(self.x) return functions.average_pooling_2d( x, 3, stride=2, pad=1, use_cudnn=self.use_cudnn)
def __call__(self, x, subtract_mean=True): if subtract_mean: x = x - self._image_mean # h = super(ModifiedGoogLeNet, self).__call__( # x, layers=['pool5'], train=train)['pool5'] # h = self.bn_fc(h, test=not train) # y = self.fc(h) # return y h = F.relu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.bn_fc(h) y = self.fc(h) if self.normalize_output: y = F.normalize(y) return y
def __call__(self, x, train): h = bst.bst(self.b0(self.conv0(x))) h = bst.bst(self.b1(self.conv1(h))) h = bst.bst(self.b2(self.conv2(h))) h = F.max_pooling_2d(h, 2) h = F.average_pooling_2d(h, 32) h = self.b3(self.fc0(h)) return h
def __call__(self, x, train): h = bst.bst(self.b0(self.conv0(x))) h = bst.bst(self.b1(self.conv1(h))) h = bst.bst(self.b2(self.conv2(h))) h = F.max_pooling_2d(h, 2) h = F.average_pooling_2d(h, 24) h = self.b3(self.fc0(h)) return h
def __call__(self, x, train=False): h = self.conv1(x, train=train) for i in six.moves.range(len(self.strides)): for ii in six.moves.range(len(self.strides[i])): name = 'res_block{}_{}'.format(i, ii) h = self[name](h, train=train) batch, channels, height, width = h.data.shape h = F.reshape(F.average_pooling_2d(h, (height, width)), (batch, channels, 1, 1)) return F.reshape(self.linear(h, train=train), (batch, self.category_num))
def __init__(self, ksize, stride=None, pad=0, use_cudnn=True): self._function = "average_pooling_2d" self.ksize = ksize self.stride = stride self.pad = pad self.use_cudnn = use_cudnn
def __call__(self, x): return F.average_pooling_2d(x, self.ksize, self.stride, self.pad, self.use_cudnn)
def __call__(self, x): skip = False if chainer.config.train and self.skip_ratio > 0 and np.random.rand() < self.skip_ratio: skip = True sh, sw = self.conv1.stride c_out, c_in, kh, kw = self.conv1.W.data.shape b, c, hh, ww = x.data.shape if sh == 1 and sw == 1: shape_out = (b, c_out, hh, ww) else: hh = (hh + 2 - kh) // sh + 1 ww = (ww + 2 - kw) // sw + 1 shape_out = (b, c_out, hh, ww) h = x if x.data.shape != shape_out: xp = chainer.cuda.get_array_module(x.data) n, c, hh, ww = x.data.shape pad_c = shape_out[1] - c p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32) p = chainer.Variable(p) x = F.concat((p, x)) if x.data.shape[2:] != shape_out[2:]: x = F.average_pooling_2d(x, 1, 2) if skip: return x h = self.bn1(self.conv1(h)) if self.activation1 is not None: h = self.activation1(h) h = self.bn2(self.conv2(h)) if not chainer.config.train: h = h * (1 - self.skip_ratio) if self.swapout: h = F.dropout(h) + F.dropout(x) else: h = h + x if self.activation2 is not None: return self.activation2(h) else: return h
def __call__(self, x): skip = False if chainer.config.train and self.skip_ratio > 0 and np.random.rand() < self.skip_ratio: skip = True sh, sw = self.conv1.stride c_out, c_in, kh, kw = self.conv1.W.data.shape b, c, hh, ww = x.data.shape if sh == 1 and sw == 1: shape_out = (b, c_out, hh, ww) else: hh = (hh + 2 - kh) // sh + 1 ww = (ww + 2 - kw) // sw + 1 shape_out = (b, c_out, hh, ww) h = x if x.data.shape != shape_out: xp = chainer.cuda.get_array_module(x.data) n, c, hh, ww = x.data.shape pad_c = shape_out[1] - c p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32) p = chainer.Variable(p) x = F.concat((p, x)) if x.data.shape[2:] != shape_out[2:]: x = F.average_pooling_2d(x, 1, 2) if skip: return x h = self.bn1(h) if self.activation1 is not None: h = self.activation1(h) h = self.conv1(h) h = self.bn2(h) if self.activation2 is not None: h = self.activation2(h) h = self.conv2(h) if not chainer.config.train: h = h * (1 - self.skip_ratio) if self.swapout: return F.dropout(h) + F.dropout(x) else: return h + x
def __call__(self, x): xp = chainer.cuda.get_array_module(x.data) skip = False if chainer.config.train and self.skip_ratio > 0 and np.random.rand() < self.skip_ratio: skip = True sh, sw = self.conv1.stride c_out, c_in, kh, kw = self.conv1.W.data.shape b, c, hh, ww = x.data.shape if sh == 1 and sw == 1: shape_out = (b, c_out, hh, ww) else: hh = (hh + 2 - kh) // sh + 1 ww = (ww + 2 - kw) // sw + 1 shape_out = (b, c_out, hh, ww) h = x if x.data.shape[2:] != shape_out[2:]: x = F.average_pooling_2d(x, 1, 2) if x.data.shape[1] != c_out: n, c, hh, ww = x.data.shape pad_c = c_out - c p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32) p = chainer.Variable(p) x = F.concat((x, p), axis=1) if skip: return x h = self.bn1(h) h = self.conv1(h) h = self.bn2(h) if self.activation is not None: h = self.activation(h) h = self.conv2(h) h = self.bn3(h) if self.skip_ratio > 0 and not chainer.config.train: h = h * (1 - self.skip_ratio) return h + x
def __call__(self, x): h = self.bconv1_1(x) h = self.bconv1_2(h) h = F.dropout(F.max_pooling_2d(h, 2), 0.25) h = self.bconv2_1(h) h = self.bconv2_2(h) h = F.dropout(F.max_pooling_2d(h, 2), 0.25) h = self.bconv3_1(h) h = self.bconv3_2(h) h = self.bconv3_3(h) h = self.bconv3_4(h) h = F.dropout(F.max_pooling_2d(h, 2), 0.25) h = F.average_pooling_2d(h, 4, 1, 0) h = self.fc(F.dropout(h)) return h
def __call__(self, x): h = self.bconv1_1(x) h = F.dropout(h, 0.25) h = self.bconv1_2(h) h = F.dropout(h, 0.25) h = self.bconv1_3(h) h = F.dropout(h, 0.25) h = self.bconv1_4(h) h = F.dropout(F.max_pooling_2d(h, 2), 0.25) h = self.bconv2_1(h) h = F.dropout(h, 0.25) h = self.bconv2_2(h) h = F.dropout(h, 0.25) h = self.bconv2_3(h) h = F.dropout(h, 0.25) h = self.bconv2_4(h) h = F.dropout(F.max_pooling_2d(h, 2), 0.25) h = self.bconv3_1(h) h = F.dropout(h, 0.25) h = self.bconv3_2(h) h = F.dropout(h, 0.25) h = self.bconv3_3(h) h = F.dropout(h, 0.25) h = self.bconv3_4(h) h = F.dropout(h, 0.25) h = self.bconv3_5(h) h = F.dropout(h, 0.25) h = self.bconv3_6(h) h = F.dropout(h, 0.25) h = self.bconv3_7(h) h = F.dropout(h, 0.25) h = self.bconv3_8(h) h = F.dropout(F.max_pooling_2d(h, 2), 0.25) h = F.average_pooling_2d(h, 4, 1, 0) h = self.fc(F.dropout(h)) return h
def __call__(self, x): sh, sw = self.conv1_1.stride c_out, c_in, kh, kw = self.conv1_1.W.data.shape b, c, hh, ww = x.data.shape if sh == 1 and sw == 1: shape_out = (b, c_out, hh, ww) else: hh = (hh + 2 - kh) // sh + 1 ww = (ww + 2 - kw) // sw + 1 shape_out = (b, c_out, hh, ww) h = x if x.data.shape != shape_out: xp = chainer.cuda.get_array_module(x.data) n, c, hh, ww = x.data.shape pad_c = shape_out[1] - c p = xp.zeros((n, pad_c, hh, ww), dtype=xp.float32) x = F.concat((p, x)) if x.data.shape[2:] != shape_out[2:]: x = F.average_pooling_2d(x, 1, 2) h1 = self.bn1_1(self.conv1_1(h)) h2 = self.bn2_1(self.conv2_1(h)) if self.activation1 is not None: h1 = self.activation1(h1) h2 = self.activation1(h2) h1 = self.bn1_2(self.conv1_2(h1)) h2 = self.bn2_2(self.conv2_2(h2)) h = shake_shake(h1, h2) + x if self.activation2 is not None: return self.activation2(h) else: return h
def forward(self, x): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = l h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = l h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) loss3 = h return loss1,loss2,loss3
def __call__(self, x, t): h = self.bn1(self.conv1(x)) h = F.max_pooling_2d(F.relu(h), 3, stride=2) h = self.res2(h) h = self.res3(h) h = self.res4(h) h = self.res5(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.fc(h) loss = F.softmax_cross_entropy(h, t) chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self) return loss
def __call__(self, x, t): self.clear() h = self.bn1(self.conv1(x), test=not self.train) h = F.max_pooling_2d(F.relu(h), 3, stride=2) h = self.res2(h, self.train) h = self.res3(h, self.train) h = self.res4(h, self.train) h = self.res5(h, self.train) h = F.average_pooling_2d(h, 7, stride=1) h = self.fc(h) loss = F.softmax_cross_entropy(h, t) chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self) return loss
def __call__(self, x_0, x_1): h = F.concat([self.c0_0(x_0), self.c0_1(x_1)]) h = self.c1(h) h = self.c2(h) h = self.c3(h) h = self.c4(h) #h = F.average_pooling_2d(h, h.data.shape[2], 1, 0) return h