我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用chainer.functions.local_response_normalization()。
def __call__(self, x, t): self.clear() h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h)), train=self.train) h = F.dropout(F.relu(self.fc7(h)), train=self.train) h = self.fc8(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
def check_forward(self, x_data): x = chainer.Variable(x_data) y = functions.local_response_normalization(x) self.assertEqual(y.data.dtype, self.dtype) y_data = cuda.to_cpu(y.data) # Naive implementation y_expect = numpy.zeros_like(self.x) for n, c, h, w in numpy.ndindex(self.x.shape): s = 0 for i in six.moves.range(max(0, c - 2), min(7, c + 2)): s += self.x[n, i, h, w] ** 2 denom = (2 + 1e-4 * s) ** .75 y_expect[n, c, h, w] = self.x[n, c, h, w] / denom gradient_check.assert_allclose( y_expect, y_data, **self.check_forward_optionss)
def __call__(self, x, depth=1): assert 1 <= depth <= self.n_encdec h = F.local_response_normalization(x, 5, 1, 0.0005, 0.75) # Unchain the inner EncDecs after the given depth encdec = getattr(self, 'encdec{}'.format(depth)) encdec.inside = None h = self.encdec1(h, train=self.train) h = self.conv_cls(h) return h
def __call__(self, x, y, t): self.clear() hR = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convR1(x))), 3, stride=2) hR = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convR2(hR))), 3, stride=2) hR = F.relu(self.convR3(hR)) hR = F.relu(self.convR4(hR)) hR = F.max_pooling_2d(F.relu(self.convR5(hR)), 3, stride=2) hR = F.dropout(F.relu(self.fcR6(hR)), train=self.train) hR = F.dropout(F.relu(self.fcR7(hR)), train=self.train) hD = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convD1(y))), 3, stride=2) hD = F.max_pooling_2d(F.relu( F.local_response_normalization(self.convD2(hD))), 3, stride=2) hD = F.relu(self.convD3(hD)) hD = F.relu(self.convD4(hD)) hD = F.max_pooling_2d(F.relu(self.convD5(hD)), 3, stride=2) hD = F.dropout(F.relu(self.fcD6(hD)), train=self.train) hD = F.dropout(F.relu(self.fcD7(hD)), train=self.train) h = F.dropout(F.relu(self.fc8(hR, hD)), train=self.train) h = self.fc9(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
def _setup_lrn(self, layer): param = layer.lrn_param if param.norm_region != param.ACROSS_CHANNELS: raise RuntimeError('Within-channel LRN is not supported') fwd = _SingleArgumentFunction( functions.local_response_normalization, n=param.local_size, k=param.k, alpha=param.alpha / param.local_size, beta=param.beta) self.forwards[layer.name] = fwd self._add_layer(layer)
def __call__(self, x, subtract_mean=True): if subtract_mean: x = x - self._image_mean # h = super(ModifiedGoogLeNet, self).__call__( # x, layers=['pool5'], train=train)['pool5'] # h = self.bn_fc(h, test=not train) # y = self.fc(h) # return y h = F.relu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2) h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.bn_fc(h) y = self.fc(h) if self.normalize_output: y = F.normalize(y) return y
def __call__(self, x): """Compute an image-wise score from a batch of images Args: x (chainer.Variable): A variable with 4D image array. Returns: chainer.Variable: An image-wise score. Its channel size is :obj:`self.n_class`. """ p1 = F.MaxPooling2D(2, 2) p2 = F.MaxPooling2D(2, 2) p3 = F.MaxPooling2D(2, 2) p4 = F.MaxPooling2D(2, 2) h = F.local_response_normalization(x, 5, 1, 1e-4 / 5., 0.75) h = _pool_without_cudnn(p1, F.relu(self.conv1_bn(self.conv1(h)))) h = _pool_without_cudnn(p2, F.relu(self.conv2_bn(self.conv2(h)))) h = _pool_without_cudnn(p3, F.relu(self.conv3_bn(self.conv3(h)))) h = _pool_without_cudnn(p4, F.relu(self.conv4_bn(self.conv4(h)))) h = self._upsampling_2d(h, p4) h = self.conv_decode4_bn(self.conv_decode4(h)) h = self._upsampling_2d(h, p3) h = self.conv_decode3_bn(self.conv_decode3(h)) h = self._upsampling_2d(h, p2) h = self.conv_decode2_bn(self.conv_decode2(h)) h = self._upsampling_2d(h, p1) h = self.conv_decode1_bn(self.conv_decode1(h)) score = self.conv_classifier(h) return score
def forward(self, x): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = l h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = l h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) loss3 = h return loss1,loss2,loss3
def forward(self, x): y1 = self.model['conv1/7x7_s2'](x) h = F.relu(y1) h = F.local_response_normalization(self.pool_func(h, 3, stride=2), n=5) h = F.relu(self.model['conv2/3x3_reduce'](h)) y2 = self.model['conv2/3x3'](h) h = F.relu(y2) h = self.pool_func(F.local_response_normalization(h, n=5), 3, stride=2) out1 = self.model['inception_3a/1x1'](h) out3 = self.model['inception_3a/3x3'](F.relu(self.model['inception_3a/3x3_reduce'](h))) out5 = self.model['inception_3a/5x5'](F.relu(self.model['inception_3a/5x5_reduce'](h))) pool = self.model['inception_3a/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y3 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y3) out1 = self.model['inception_3b/1x1'](h) out3 = self.model['inception_3b/3x3'](F.relu(self.model['inception_3b/3x3_reduce'](h))) out5 = self.model['inception_3b/5x5'](F.relu(self.model['inception_3b/5x5_reduce'](h))) pool = self.model['inception_3b/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y4 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y4) h = self.pool_func(h, 3, stride=2) out1 = self.model['inception_4a/1x1'](h) out3 = self.model['inception_4a/3x3'](F.relu(self.model['inception_4a/3x3_reduce'](h))) out5 = self.model['inception_4a/5x5'](F.relu(self.model['inception_4a/5x5_reduce'](h))) pool = self.model['inception_4a/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y5 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y5) out1 = self.model['inception_4b/1x1'](h) out3 = self.model['inception_4b/3x3'](F.relu(self.model['inception_4b/3x3_reduce'](h))) out5 = self.model['inception_4b/5x5'](F.relu(self.model['inception_4b/5x5_reduce'](h))) pool = self.model['inception_4b/pool_proj'](self.pool_func(h, 3, stride=1, pad=1)) y6 = F.concat((out1, out3, out5, pool), axis=1) h = F.relu(y6) return [y1,y2,y3,y4,y5,y6]
def predict(self, x): h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2) h = F.dropout(F.relu(self.fc6(h)), train=self.train) h = F.dropout(F.relu(self.fc7(h)), train=self.train) h = F.dropout(F.relu(self.fc8(h)), train=self.train) h = self.fc9(h) return h
def l2norm(self, x): """ Force embeddings onto a hypershere. This has been done in Schroff et al. “FaceNet: A Unified Embedding for Face Recognition and Clustering.” arXiv:1503.03832 [cs], March 12, 2015. http://arxiv.org/abs/1503.03832. However it did not work out for me very well, so this function is currently not used. """ return F.local_response_normalization(x, n=x.data.shape[1]*2, k=0, alpha=1, beta=0.5)
def __call__(self, x, t): h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 3, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=1) h = F.dropout(F.relu(self.fc6(h)), train=self.train) h = F.dropout(F.relu(self.fc7(h)), train=self.train) h = self.fc8(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
def __call__(self, x, t): h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv1(x))), 2, stride=2) h = F.max_pooling_2d(F.relu( F.local_response_normalization(self.conv2(h))), 2, stride=2) h = F.dropout(F.relu(self.conv3(h)), ratio=0.7, train=self.train) h = F.max_pooling_2d(F.relu(self.conv4(h)), 2, stride=2) h = F.max_pooling_2d(F.relu(self.conv5(h)), 2, stride=2, cover_all=True) h = F.dropout(F.relu(self.fc6(h)), ratio=0.7, train=self.train) h = F.dropout(F.relu(self.fc7(h)), ratio=0.7, train=self.train) h = self.fc8(h) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss
def __call__(self, x, t): self.clear() h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) self.loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) self.loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) self.loss3 = F.softmax_cross_entropy(h, t) self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3 self.accuracy = F.accuracy(h, t) return self.loss
def __call__(self, x, t): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4, train=self.train)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 accuracy = F.accuracy(h, t) chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) return loss
def __call__(self, x, t): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 accuracy = F.accuracy(h, t) chainer.report({ 'loss': loss, 'loss1': loss1, 'loss2': loss2, 'loss3': loss3, 'accuracy': accuracy }, self) return loss
def __call__(self, x, t): h = F.relu(self.conv1(x)) h = F.local_response_normalization( F.max_pooling_2d(h, 3, stride=2), n=5) h = F.relu(self.conv2_reduce(h)) h = F.relu(self.conv2(h)) h = F.max_pooling_2d( F.local_response_normalization(h, n=5), 3, stride=2) h = self.inc3a(h) h = self.inc3b(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc4a(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss1_conv(l)) l = F.relu(self.loss1_fc1(l)) l = self.loss1_fc2(l) loss1 = F.softmax_cross_entropy(l, t) h = self.inc4b(h) h = self.inc4c(h) h = self.inc4d(h) l = F.average_pooling_2d(h, 5, stride=3) l = F.relu(self.loss2_conv(l)) l = F.relu(self.loss2_fc1(l)) l = self.loss2_fc2(l) loss2 = F.softmax_cross_entropy(l, t) h = self.inc4e(h) h = F.max_pooling_2d(h, 3, stride=2) h = self.inc5a(h) h = self.inc5b(h) h = F.average_pooling_2d(h, 7, stride=1) h = self.loss3_fc(F.dropout(h, 0.4)) loss3 = F.softmax_cross_entropy(h, t) loss = 0.3 * (loss1 + loss2) + loss3 return loss