我们从Python开源项目中,提取了以下37个代码示例,用于说明如何使用lasagne.layers.Pool2DLayer()。
def inceptionA(input_layer, nfilt): # Corresponds to a modified version of figure 5 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2) l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1) l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad') l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2, l3, l4])
def inceptionC(input_layer, nfilt): # Corresponds to figure 6 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3)) l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0)) l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=(7, 1), pad=(3, 0)) l3 = bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 7), pad=(0, 3)) l3 = bn_conv(l3, num_filters=nfilt[2][3], filter_size=(7, 1), pad=(3, 0)) l3 = bn_conv(l3, num_filters=nfilt[2][4], filter_size=(1, 7), pad=(0, 3)) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad') l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2, l3, l4])
def inceptionE(input_layer, nfilt, pool_mode): # Corresponds to figure 7 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2a = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 3), pad=(0, 1)) l2b = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(3, 1), pad=(1, 0)) l3 = bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1) l3a = bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 3), pad=(0, 1)) l3b = bn_conv(l3, num_filters=nfilt[2][3], filter_size=(3, 1), pad=(1, 0)) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode=pool_mode) l4 = bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2a, l2b, l3a, l3b, l4])
def inceptionA(self, input_layer, nfilt): # Corresponds to a modified version of figure 5 in the paper l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=5, pad=2) l3 = self.bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = self.bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1) l3 = self.bn_conv(l3, num_filters=nfilt[2][2], filter_size=3, pad=1) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad') l4 = self.bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2, l3, l4])
def inceptionC(self, input_layer, nfilt): # Corresponds to figure 6 in the paper l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3)) l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0)) l3 = self.bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = self.bn_conv(l3, num_filters=nfilt[2][1], filter_size=(7, 1), pad=(3, 0)) l3 = self.bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 7), pad=(0, 3)) l3 = self.bn_conv(l3, num_filters=nfilt[2][3], filter_size=(7, 1), pad=(3, 0)) l3 = self.bn_conv(l3, num_filters=nfilt[2][4], filter_size=(1, 7), pad=(0, 3)) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode='average_exc_pad') l4 = self.bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2, l3, l4])
def inceptionE(self, input_layer, nfilt, pool_mode): # Corresponds to figure 7 in the paper l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2a = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 3), pad=(0, 1)) l2b = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(3, 1), pad=(1, 0)) l3 = self.bn_conv(input_layer, num_filters=nfilt[2][0], filter_size=1) l3 = self.bn_conv(l3, num_filters=nfilt[2][1], filter_size=3, pad=1) l3a = self.bn_conv(l3, num_filters=nfilt[2][2], filter_size=(1, 3), pad=(0, 1)) l3b = self.bn_conv(l3, num_filters=nfilt[2][3], filter_size=(3, 1), pad=(1, 0)) l4 = Pool2DLayer( input_layer, pool_size=3, stride=1, pad=1, mode=pool_mode) l4 = self.bn_conv(l4, num_filters=nfilt[3][0], filter_size=1) return ConcatLayer([l1, l2a, l2b, l3a, l3b, l4])
def __init__(self, incoming, pool_size, stride=None, pad=(0, 0), ignore_border=True, centered=True, **kwargs): """A padded pooling layer Parameters ---------- incoming : lasagne.layers.Layer The input layer pool_size : int The size of the pooling stride : int or iterable of int The stride or subsampling of the convolution pad : int, iterable of int, ``full``, ``same`` or ``valid`` **Ignored!** Kept for compatibility with the :class:``lasagne.layers.Pool2DLayer`` ignore_border : bool See :class:``lasagne.layers.Pool2DLayer`` centered : bool If True, the padding will be added on both sides. If False the zero padding will be applied on the upper left side. **kwargs Any additional keyword arguments are passed to the Layer superclass """ self.centered = centered if pad not in [0, (0, 0), [0, 0]]: warnings.warn('The specified padding will be ignored', RuntimeWarning) super(PaddedPool2DLayer, self).__init__(incoming, pool_size, stride, pad, ignore_border, **kwargs) if self.input_shape[2:] != (None, None): warnings.warn('This Layer should only be used when the size of ' 'the image is not known', RuntimeWarning)
def setup_perceptual(self, input): """Use lasagne to create a network of convolution layers using pre-trained VGG19 weights. """ offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1)) self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset) self.network['mse'] = self.network['percept'] self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1) self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1) self.network['pool1'] = PoolLayer(self.network['conv1_2'], 2, mode='max') self.network['conv2_1'] = ConvLayer(self.network['pool1'], 128, 3, pad=1) self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1) self.network['pool2'] = PoolLayer(self.network['conv2_2'], 2, mode='max') self.network['conv3_1'] = ConvLayer(self.network['pool2'], 256, 3, pad=1) self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1) self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1) self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1) self.network['pool3'] = PoolLayer(self.network['conv3_4'], 2, mode='max') self.network['conv4_1'] = ConvLayer(self.network['pool3'], 512, 3, pad=1) self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1) self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1) self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1) self.network['pool4'] = PoolLayer(self.network['conv4_4'], 2, mode='max') self.network['conv5_1'] = ConvLayer(self.network['pool4'], 512, 3, pad=1) self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1) self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1) self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
def inceptionB(input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1) l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
def inceptionD(input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2) l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3)) l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0)) l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
def build_cnn(k_height=1, k_width=25, input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 1, 4, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (k_height, k_width), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2)) l_drop1 = lasagne.layers.dropout(l_pool1, p=.75) l_fc = lasagne.layers.DenseLayer( l_drop1, num_units=50, nonlinearity=lasagne.nonlinearities.rectify) l_drop2 = lasagne.layers.dropout(l_fc, p=.75) l_out = lasagne.layers.DenseLayer( l_drop2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(k_height=1, k_width=25, input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 1, 30, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (k_height, k_width), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (3,3), stride = (3,3)) l_drop1 = lasagne.layers.dropout(l_pool1, p=.75) l_fc = lasagne.layers.DenseLayer( l_drop1, num_units=50, nonlinearity=lasagne.nonlinearities.rectify) l_drop2 = lasagne.layers.dropout(l_fc, p=.75) l_out = lasagne.layers.DenseLayer( l_drop2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(k_height=1, k_width=25, input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 5, NUM_ELECTRODES, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (k_height, k_width), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2)) l_drop1 = lasagne.layers.dropout(l_pool1, p=.75) l_fc = lasagne.layers.DenseLayer( l_drop1, num_units=50, nonlinearity=lasagne.nonlinearities.rectify) l_drop2 = lasagne.layers.dropout(l_fc, p=.75) l_out = lasagne.layers.DenseLayer( l_drop2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(k_height=3, k_width=3, input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 5, 30, 30), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (k_height, k_width), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = (2,2), stride = (2,2)) l_drop1 = lasagne.layers.dropout(l_pool1, p=.75) l_fc = lasagne.layers.DenseLayer( l_drop1, num_units=50, nonlinearity=lasagne.nonlinearities.rectify) l_drop2 = lasagne.layers.dropout(l_fc, p=.75) l_out = lasagne.layers.DenseLayer( l_drop2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (3,3), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2) l_drop1 = lasagne.layers.dropout(l_pool1, p=.75) l_fc = lasagne.layers.DenseLayer( l_drop1, num_units=50, nonlinearity=lasagne.nonlinearities.rectify) l_drop2 = lasagne.layers.dropout(l_fc, p=.75) l_out = lasagne.layers.DenseLayer( l_drop2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(k_height, k_width, input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (k_height, k_width), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2) l_drop1 = lasagne.layers.dropout(l_pool1, p=.75) l_fc = lasagne.layers.DenseLayer( l_drop1, num_units=50, nonlinearity=lasagne.nonlinearities.rectify) l_drop2 = lasagne.layers.dropout(l_fc, p=.75) l_out = lasagne.layers.DenseLayer( l_drop2, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = 3, stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2) # A fully-connected layer l_fc = lasagne.layers.DenseLayer( l_pool1, num_units=512, nonlinearity=lasagne.nonlinearities.rectify) l_out = lasagne.layers.DenseLayer( l_fc, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def build_cnn(input_var=None): # Input layer, as usual: l_in = InputLayer(shape=(None, 1, 64, 512), input_var=input_var) l_conv1 = Conv2DLayer(incoming = l_in, num_filters = 8, filter_size = (3,3), stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02), nonlinearity = lasagne.nonlinearities.very_leaky_rectify) l_pool1 = Pool2DLayer(incoming = l_conv1, pool_size = 2, stride = 2) # A fully-connected layer l_fc = lasagne.layers.DenseLayer( l_pool1, num_units=512, nonlinearity=lasagne.nonlinearities.rectify) l_out = lasagne.layers.DenseLayer( l_fc, num_units=2, nonlinearity=lasagne.nonlinearities.softmax) return l_out # ############################# Batch iterator ############################### # This is just a simple helper function iterating over training data in # mini-batches of a particular size, optionally in random order. It assumes # data is available as numpy arrays. For big datasets, you could load numpy # arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your # own custom data iteration function. For small datasets, you can also copy # them to GPU at once for slightly improved performance. This would involve # several changes in the main program, though, and is not demonstrated here. # Notice that this function returns only mini-batches of size `batchsize`. # If the size of the data is not a multiple of `batchsize`, it will not # return the last (remaining) mini-batch.
def setup_perceptual(self, input): offset = np.array([103.939, 116.779, 123.680], dtype=np.float32).reshape((1,3,1,1)) self.network['percept'] = lasagne.layers.NonlinearityLayer(input, lambda x: ((x+0.5)*255.0) - offset) self.network['mse'] = self.network['percept'] self.network['conv1_1'] = ConvLayer(self.network['percept'], 64, 3, pad=1) self.network['conv1_2'] = ConvLayer(self.network['conv1_1'], 64, 3, pad=1) self.network['pool1'] = PoolLayer(self.network['conv1_2'], 2, mode='max') self.network['conv2_1'] = ConvLayer(self.network['pool1'], 128, 3, pad=1) self.network['conv2_2'] = ConvLayer(self.network['conv2_1'], 128, 3, pad=1) self.network['pool2'] = PoolLayer(self.network['conv2_2'], 2, mode='max') self.network['conv3_1'] = ConvLayer(self.network['pool2'], 256, 3, pad=1) self.network['conv3_2'] = ConvLayer(self.network['conv3_1'], 256, 3, pad=1) self.network['conv3_3'] = ConvLayer(self.network['conv3_2'], 256, 3, pad=1) self.network['conv3_4'] = ConvLayer(self.network['conv3_3'], 256, 3, pad=1) self.network['pool3'] = PoolLayer(self.network['conv3_4'], 2, mode='max') self.network['conv4_1'] = ConvLayer(self.network['pool3'], 512, 3, pad=1) self.network['conv4_2'] = ConvLayer(self.network['conv4_1'], 512, 3, pad=1) self.network['conv4_3'] = ConvLayer(self.network['conv4_2'], 512, 3, pad=1) self.network['conv4_4'] = ConvLayer(self.network['conv4_3'], 512, 3, pad=1) self.network['pool4'] = PoolLayer(self.network['conv4_4'], 2, mode='max') self.network['conv5_1'] = ConvLayer(self.network['pool4'], 512, 3, pad=1) self.network['conv5_2'] = ConvLayer(self.network['conv5_1'], 512, 3, pad=1) self.network['conv5_3'] = ConvLayer(self.network['conv5_2'], 512, 3, pad=1) self.network['conv5_4'] = ConvLayer(self.network['conv5_3'], 512, 3, pad=1)
def setup_loss_net(self): """ Create a network of convolution layers based on the VGG16 architecture from the paper: "Very Deep Convolutional Networks for Large-Scale Image Recognition" Original source: https://gist.github.com/ksimonyan/211839e770f7b538e2d8 License: see http://www.robots.ox.ac.uk/~vgg/research/very_deep/ Based on code in the Lasagne Recipes repository: https://github.com/Lasagne/Recipes """ loss_net = self.network['loss_net'] loss_net['input'] = InputLayer(shape=self.shape) loss_net['conv1_1'] = ConvLayer(loss_net['input'], 64, 3, pad=1, flip_filters=False) loss_net['conv1_2'] = ConvLayer(loss_net['conv1_1'], 64, 3, pad=1, flip_filters=False) loss_net['pool1'] = PoolLayer(loss_net['conv1_2'], 2) loss_net['conv2_1'] = ConvLayer(loss_net['pool1'], 128, 3, pad=1, flip_filters=False) loss_net['conv2_2'] = ConvLayer(loss_net['conv2_1'], 128, 3, pad=1, flip_filters=False) loss_net['pool2'] = PoolLayer(loss_net['conv2_2'], 2) loss_net['conv3_1'] = ConvLayer(loss_net['pool2'], 256, 3, pad=1, flip_filters=False) loss_net['conv3_2'] = ConvLayer(loss_net['conv3_1'], 256, 3, pad=1, flip_filters=False) loss_net['conv3_3'] = ConvLayer(loss_net['conv3_2'], 256, 3, pad=1, flip_filters=False) loss_net['pool3'] = PoolLayer(loss_net['conv3_3'], 2) loss_net['conv4_1'] = ConvLayer(loss_net['pool3'], 512, 3, pad=1, flip_filters=False) loss_net['conv4_2'] = ConvLayer(loss_net['conv4_1'], 512, 3, pad=1, flip_filters=False) loss_net['conv4_3'] = ConvLayer(loss_net['conv4_2'], 512, 3, pad=1, flip_filters=False) loss_net['pool4'] = PoolLayer(loss_net['conv4_3'], 2) loss_net['conv5_1'] = ConvLayer(loss_net['pool4'], 512, 3, pad=1, flip_filters=False) loss_net['conv5_2'] = ConvLayer(loss_net['conv5_1'], 512, 3, pad=1, flip_filters=False) loss_net['conv5_3'] = ConvLayer(loss_net['conv5_2'], 512, 3, pad=1, flip_filters=False)
def build_model(input_var): net = {} net['input'] = InputLayer((None, 3, 224, 224), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1, flip_filters=False) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1, flip_filters=False) net['pool1'] = PoolLayer(net['conv1_2'], 2) net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1, flip_filters=False) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1, flip_filters=False) net['pool2'] = PoolLayer(net['conv2_2'], 2) net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1, flip_filters=False) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1, flip_filters=False) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1, flip_filters=False) net['pool3'] = PoolLayer(net['conv3_3'], 2) net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1, flip_filters=False) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1, flip_filters=False) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1, flip_filters=False) net['pool4'] = PoolLayer(net['conv4_3'], 2) net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1, flip_filters=False) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1, flip_filters=False) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1, flip_filters=False) net['pool5'] = PoolLayer(net['conv5_3'], 2) net['fc6'] = DenseLayer(net['pool5'], num_units=4096) net['fc6_dropout'] = DropoutLayer(net['fc6'], p=0.5) net['fc7'] = DenseLayer(net['fc6_dropout'], num_units=4096) net['fc7_dropout'] = DropoutLayer(net['fc7'], p=0.5) net['fc8'] = DenseLayer(net['fc7_dropout'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc8'], softmax) return net
def build_vgg(shape, input_var): net = {} w,h = shape net['input'] = InputLayer((None, 3, w, h), input_var=input_var) net['conv1_1'] = ConvLayer(net['input'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['pool5'] = PoolLayer(net['conv5_4'], 2, mode='average_exc_pad') return net
def inceptionB(self, input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=3, stride=2) l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=3, pad=1) l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
def inceptionD(self, input_layer, nfilt): # Corresponds to a modified version of figure 10 in the paper l1 = self.bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1) l1 = self.bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2) l2 = self.bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1) l2 = self.bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3)) l2 = self.bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0)) l2 = self.bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2) l3 = Pool2DLayer(input_layer, pool_size=3, stride=2) return ConcatLayer([l1, l2, l3])
def setup_model(self, input=None): """Use lasagne to create a network of convolution layers, first using VGG19 as the framework and then adding augmentations for Semantic Style Transfer. """ net, self.channels = {}, {} # Primary network for the main image. These are convolution only, and stop at layer 4_2 (rest unused). net['img'] = input or InputLayer((None, 3, None, None)) net['conv1_1'] = ConvLayer(net['img'], 64, 3, pad=1) net['conv1_2'] = ConvLayer(net['conv1_1'], 64, 3, pad=1) net['pool1'] = PoolLayer(net['conv1_2'], 2, mode='average_exc_pad') net['conv2_1'] = ConvLayer(net['pool1'], 128, 3, pad=1) net['conv2_2'] = ConvLayer(net['conv2_1'], 128, 3, pad=1) net['pool2'] = PoolLayer(net['conv2_2'], 2, mode='average_exc_pad') net['conv3_1'] = ConvLayer(net['pool2'], 256, 3, pad=1) net['conv3_2'] = ConvLayer(net['conv3_1'], 256, 3, pad=1) net['conv3_3'] = ConvLayer(net['conv3_2'], 256, 3, pad=1) net['conv3_4'] = ConvLayer(net['conv3_3'], 256, 3, pad=1) net['pool3'] = PoolLayer(net['conv3_4'], 2, mode='average_exc_pad') net['conv4_1'] = ConvLayer(net['pool3'], 512, 3, pad=1) net['conv4_2'] = ConvLayer(net['conv4_1'], 512, 3, pad=1) net['conv4_3'] = ConvLayer(net['conv4_2'], 512, 3, pad=1) net['conv4_4'] = ConvLayer(net['conv4_3'], 512, 3, pad=1) net['pool4'] = PoolLayer(net['conv4_4'], 2, mode='average_exc_pad') net['conv5_1'] = ConvLayer(net['pool4'], 512, 3, pad=1) net['conv5_2'] = ConvLayer(net['conv5_1'], 512, 3, pad=1) net['conv5_3'] = ConvLayer(net['conv5_2'], 512, 3, pad=1) net['conv5_4'] = ConvLayer(net['conv5_3'], 512, 3, pad=1) net['main'] = net['conv5_4'] # Auxiliary network for the semantic layers, and the nearest neighbors calculations. net['map'] = InputLayer((1, 1, None, None)) for j, i in itertools.product(range(5), range(4)): if j < 2 and i > 1: continue suffix = '%i_%i' % (j+1, i+1) if i == 0: net['map%i'%(j+1)] = PoolLayer(net['map'], 2**j, mode='average_exc_pad') self.channels[suffix] = net['conv'+suffix].num_filters if args.semantic_weight > 0.0: net['sem'+suffix] = ConcatLayer([net['conv'+suffix], net['map%i'%(j+1)]]) else: net['sem'+suffix] = net['conv'+suffix] net['dup'+suffix] = InputLayer(net['sem'+suffix].output_shape) net['nn'+suffix] = ConvLayer(net['dup'+suffix], 1, 3, b=None, pad=0, flip_filters=False) self.network = net
def build_model_resnet50(input_shape): net = {} net['input'] = InputLayer(input_shape) sub_net, parent_layer_name = build_simple_block( net['input'], ['conv1', 'bn_conv1', 'conv1_relu'], 64, 7, 2, 3, use_bias=True) net.update(sub_net) net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False) block_size = list('abc') parent_layer_name = 'pool1' for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c) net.update(sub_net) # block_size = ['a'] + ['b'+str(i+1) for i in range(7)] block_size = list('abcd') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c) net.update(sub_net) # block_size = ['a'] + ['b'+str(i+1) for i in range(35)] block_size = list('abcdef') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c) net.update(sub_net) block_size = list('abc') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c) net.update(sub_net) net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0, mode='average_exc_pad', ignore_border=False) net['fc1000'] = DenseLayer(net['pool5'], num_units=1000, nonlinearity=None, W=lasagne.init.Normal(std=0.01, mean=0.0)) net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax) return net # model hyperparams
def build_model_resnet152(input_shape): net = {} net['input'] = InputLayer(input_shape) sub_net, parent_layer_name = build_simple_block( net['input'], ['conv1', 'bn_conv1', 'conv1_relu'], 64, 7, 2, 3, use_bias=True) net.update(sub_net) net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False) block_size = list('abc') parent_layer_name = 'pool1' for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c) net.update(sub_net) block_size = ['a'] + ['b'+str(i+1) for i in range(7)] # block_size = list('abcd') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c) net.update(sub_net) block_size = ['a'] + ['b'+str(i+1) for i in range(35)] # block_size = list('abcdef') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c) net.update(sub_net) block_size = list('abc') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c) net.update(sub_net) net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0, mode='average_exc_pad', ignore_border=False) net['fc1000'] = DenseLayer(net['pool5'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax) print('Total number of layers:', len(lasagne.layers.get_all_layers(net['prob']))) return net # model hyperparams
def build(input_height, input_width, concat_var): """ Build the discriminator, all weights initialized from scratch :param input_width: :param input_height: :param concat_var: Theano symbolic tensor variable :return: Dictionary that contains the discriminator """ net = {'input': InputLayer((None, 4, input_height, input_width), input_var=concat_var)} print "Input: {}".format(net['input'].output_shape[1:]) net['merge'] = ConvLayer(net['input'], 3, 1, pad=0, flip_filters=False) print "merge: {}".format(net['merge'].output_shape[1:]) net['conv1'] = ConvLayer(net['merge'], 32, 3, pad=1) print "conv1: {}".format(net['conv1'].output_shape[1:]) net['pool1'] = PoolLayer(net['conv1'], 4) print "pool1: {}".format(net['pool1'].output_shape[1:]) net['conv2_1'] = ConvLayer(net['pool1'], 64, 3, pad=1) print "conv2_1: {}".format(net['conv2_1'].output_shape[1:]) net['conv2_2'] = ConvLayer(net['conv2_1'], 64, 3, pad=1) print "conv2_2: {}".format(net['conv2_2'].output_shape[1:]) net['pool2'] = PoolLayer(net['conv2_2'], 2) print "pool2: {}".format(net['pool2'].output_shape[1:]) net['conv3_1'] = nn.weight_norm(ConvLayer(net['pool2'], 64, 3, pad=1)) print "conv3_1: {}".format(net['conv3_1'].output_shape[1:]) net['conv3_2'] = nn.weight_norm(ConvLayer(net['conv3_1'], 64, 3, pad=1)) print "conv3_2: {}".format(net['conv3_2'].output_shape[1:]) net['pool3'] = PoolLayer(net['conv3_2'], 2) print "pool3: {}".format(net['pool3'].output_shape[1:]) net['fc4'] = DenseLayer(net['pool3'], num_units=100, nonlinearity=tanh) print "fc4: {}".format(net['fc4'].output_shape[1:]) net['fc5'] = DenseLayer(net['fc4'], num_units=2, nonlinearity=tanh) print "fc5: {}".format(net['fc5'].output_shape[1:]) net['prob'] = DenseLayer(net['fc5'], num_units=1, nonlinearity=sigmoid) print "prob: {}".format(net['prob'].output_shape[1:]) return net
def build_resnet(): net = {} net['input'] = InputLayer((None, 3, 224, 224)) sub_net, parent_layer_name = build_simple_block( net['input'], ['conv1', 'bn_conv1', 'conv1_relu'], 64, 7, 2, 3, use_bias=True) net.update(sub_net) net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False) block_size = list('abc') parent_layer_name = 'pool1' for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c) net.update(sub_net) block_size = list('abcd') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c) net.update(sub_net) block_size = list('abcdef') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c) net.update(sub_net) block_size = list('abc') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c) net.update(sub_net) net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0, mode='average_exc_pad', ignore_border=False) net['fc1000'] = DenseLayer(net['pool5'], num_units=1000, nonlinearity=None) net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax) return net
def build_model(input_size): net = {} net['input'] = InputLayer(input_size) sub_net, parent_layer_name = build_simple_block( net['input'], ['conv1', 'bn_conv1', 'conv1_relu'], 64, 7, 2, 3, use_bias=True) net.update(sub_net) net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False) block_size = list('abc') parent_layer_name = 'pool1' for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c) net.update(sub_net) block_size = list('abcd') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c) net.update(sub_net) block_size = list('abcdef') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c) net.update(sub_net) block_size = list('abc') for c in block_size: if c == 'a': sub_net, parent_layer_name = build_residual_block( net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c) else: sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c) net.update(sub_net) net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0, mode='average_exc_pad', ignore_border=False) return net #Setup the original network