我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.signal.downsample.max_pool_2d()。
def convpool(X, W, b, poolsize=(2, 2)): conv_out = conv2d(input=X, filters=W) # downsample each feature map individually, using maxpooling pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True ) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height # return T.tanh(pooled_out + b.dimshuffle('x', 0, 'x', 'x')) return relu(pooled_out + b.dimshuffle('x', 0, 'x', 'x'))
def predict(self, new_data, batch_size): """ predict for new data """ img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def get_output(self, train): X = self.get_input(train) output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border, mode=globals.pooling_mode) return output # class AveragePooling2D(MaxPooling2D): # def __init__(self, poolsize=(2, 2), stride=None, ignore_border=True): # super(AveragePooling2D, self).__init__() # self.input = T.tensor4() # self.poolsize = tuple(poolsize) # self.stride = stride # self.ignore_border = ignore_border # def get_output(self, train): # X = self.get_input(train) # sums = images2neibs(X, neib_shape=(globals.s_size, 1)).sum(axis=-1) # counts = T.neq(images2neibs(X, neib_shape=(globals.s_size, 1)), 0).sum(axis=-1) # average = (sums/counts).reshape((X.shape[0], X.shape[1], 2, 1)) # return average
def __init__(self, input,params_W,params_b, filter_shape, image_shape, poolsize=(2, 2)): assert image_shape[1] == filter_shape[1] self.input = input self.W = params_W self.b = params_b # ?? conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape ) # ??? pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True ) self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) self.params = [self.W, self.b]
def max_pooling(matrix, pool_size): """ Applies max-pooling for the given matrix for specified pool_size. Only the maximum value in the given pool size is chosen to construct the result. :param matrix: Input matrix :param pool_size: pooling cell size :return: max-pooled output """ """ t_input = tensor.dmatrix('input') pool_out = ds.max_pool_2d(t_input, pool_size, ignore_border=True) pool_f = theano.function([t_input], pool_out) return pool_f(matrix) """ pass
def encoder(tparams, layer0_input, filter_shape, pool_size, options, prefix='cnn_d'): """ filter_shape: (number of filters, num input feature maps, filter height, filter width) image_shape: (batch_size, num input feature maps, image height, image width) """ conv_out = conv.conv2d(input=layer0_input, filters=tparams[_p(prefix,'W')], filter_shape=filter_shape) # conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x')) # output = downsample.max_pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=False) if options['cnn_activation'] == 'tanh': conv_out_tanh = tensor.tanh(conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=pool_size, ignore_border=False) # the ignore border is very important elif options['cnn_activation'] == 'linear': conv_out2 = conv_out + tparams[_p(prefix,'b')].dimshuffle('x', 0, 'x', 'x') output = downsample.max_pool_2d(input=conv_out2, ds=pool_size, ignore_border=False) # the ignore border is very important else: print(' Wrong specification of activation function in CNN') return output.flatten(2) #output.flatten(2)
def predict(self, new_data, batch_size): """ predict for new data """ img_shape = None#(batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def predict_maxpool(self, new_data, batch_size): """ predict for new data """ img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) return conv_out_tanh output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def output(self, x, a): return downsample.max_pool_2d(input, maxpool_shape, ignore_border=True)
def __init__(self, rng, input, image_shape, filter_shape, poolsize=(2, 2)): # ??????????????????? assert image_shape[1] == filter_shape[1] fan_in = np.prod(filter_shape[1:]) fan_out = filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize) W_bound = np.sqrt(6.0 / (fan_in + fan_out)) self.W = theano.shared( np.asarray(rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), # @UndefinedVariable borrow=True) b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) # @UndefinedVariable self.b = theano.shared(value=b_values, borrow=T) # ?????????????????? conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) # Max-pooling???????????????????? pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True) # ???????? self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) self.params = [self.W, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size): self.inpt = inpt.reshape(self.image_shape) conv_out = conv.conv2d( input=self.inpt, filters=self.w, filter_shape=self.filter_shape, image_shape=self.image_shape) pooled_out = downsample.max_pool_2d( input=conv_out, ds=self.poolsize, ignore_border=True) self.output = self.activation_fn( pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) self.output_dropout = self.output # no dropout in the convolutional layers
def conv_and_pool(input_expr, w, convs_mult, p_drop_conv): conv_w = w if convs_mult == 2: conv_w = T.concatenate([w, w[:,:,::-1,::-1]], axis=0) elif convs_mult == 4: conv_w = T.concatenate([w, w[:,:,::-1], w[:,:,:,::-1], w[:,:,::-1,::-1]], axis=0) e1 = rectify(conv2d(input_expr, conv_w)) e2 = max_pool_2d(e1, (2, 2), ignore_border=False) return dropout(e2, p_drop_conv)
def _decorate_fprop(self, layer): layer_fprop = layer.fprop def decorated_fprop(instance, input, return_output_preactivation=False): if return_output_preactivation: output, pre_output = layer_fprop(input, return_output_preactivation) pooled_output = downsample.max_pool_2d(output, self.pool_shape, ignore_border=self.ignore_border) pooled_pre_output = downsample.max_pool_2d(pre_output, self.pool_shape, ignore_border=self.ignore_border) return pooled_output, pooled_pre_output output = layer_fprop(input, return_output_preactivation) pooled_output = downsample.max_pool_2d(output, self.pool_shape, ignore_border=self.ignore_border) return pooled_output layer.fprop = MethodType(decorated_fprop, layer)
def get_output(self, train): X = self.get_input(train) X = theano.tensor.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 1, 3, 2) output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.st, ignore_border=self.ignore_border) output = output.dimshuffle(0, 1, 3, 2) return theano.tensor.reshape(output, (output.shape[0], output.shape[1], output.shape[2]))
def get_output(self, train): X = self.get_input(train) output = downsample.max_pool_2d(X, ds=self.poolsize, st=self.stride, ignore_border=self.ignore_border) return output
def model(X, w, w2, w3, w35, w4, p_drop_conv, p_drop_hidden): l1a = rectify(conv2d(X, w, border_mode='full')) #print "l1a",l1a.type #print "l1a",l1a.shape.eval() l1 = max_pool_2d(l1a, (2, 2)) #print "l1",l1.get_value().shape #l1 = dropout(l1, p_drop_conv) l2a = rectify(conv2d(l1, w2)) #print "l2a",l2a.get_value().shape l2 = max_pool_2d(l2a, (2, 2)) #print "l2",l2.get_value().shape #l2 = dropout(l2, p_drop_conv) l3 = rectify(conv2d(l2, w3)) #print "l3",l3.get_value().shape #l3 = max_pool_2d(l3a, (1, 1)) #l3 = dropout(l3, p_drop_conv) l35a = rectify(conv2d(l3, w35)) #print "l35a",l35a.get_value().shape l35b = max_pool_2d(l35a, (2, 2)) #print "l35b",l35b.get_value().shape l35 = T.flatten(l35b, outdim=2) #print "l35",l35.get_value().shape #l35 = dropout(l35, p_drop_conv) l4 = rectify(T.dot(l35, w4)) #print "l4",l4.get_value().shape #l4 = dropout(l4, p_drop_hidden) pyx = softmax(T.dot(l4, w_o)) return l1, l2, l3, l35, l4, pyx
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), stride=(1, 1)): """ Allocate a LeNetConvPoolLayer with shared variable internal parameters. """ assert image_shape[1] == filter_shape[1] self.input = input fan_in = np.prod(filter_shape[1:]) fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize)) W_bound = np.sqrt(6. / (fan_in + fan_out)) self.W = theano.shared( np.asarray( rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX ), borrow=True ) b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape, subsample=stride ) pooled_out = downsample.max_pool_2d( input=conv_out, ds=poolsize, ignore_border=True ) self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2)): assert image_shape[1] == filter_shape[1] self.input = input # there are "num input feature maps * filter height * filter width" # inputs to each hidden unit fan_in = numpy.prod(filter_shape[1:]) # each unit in the lower layer receives a gradient from: # "num output feature maps * filter height * filter width" / # pooling size fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) / numpy.prod(poolsize)) # initialize weights with random weights W_bound = numpy.sqrt(6. / (fan_in + fan_out)) self.W = theano.shared(numpy.asarray( rng.uniform(low=-W_bound, high=W_bound, size=filter_shape), dtype=theano.config.floatX), borrow=True) # the bias is a 1D tensor -- one bias per output feature map b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX) self.b = theano.shared(value=b_values, borrow=True) # convolve input feature maps with filters conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape) # downsample each feature map individually, using maxpooling pooled_out = downsample.max_pool_2d(input=conv_out, ds=poolsize, ignore_border=True) self.output = T.maximum(0.0, pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) # store parameters of this layer self.params = [self.W, self.b]
def LeNetConvPoolLayer(inps, feature_map, batch, length, window, dim, prefix, params, names): fan_in = window * dim fan_out = feature_map * window * dim / (length - window + 1) filter_shape = (feature_map, 1, window, dim) image_shape = (batch, 1, length, dim) pool_size = (length - window + 1, 1) #if non_linear=="none" or non_linear=="relu": # conv_W = theano.shared(0.2 * numpy.random.uniform(low=-1.0,high=1.0,\ # size=filter_shape).astype(theano.config.floatX)) #else: # W_bound = numpy.sqrt(6. / (fan_in + fan_out)) # conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\ # size=filter_shape).astype(theano.config.floatX)) W_bound = numpy.sqrt(6. / (fan_in + fan_out)) conv_W = theano.shared(numpy.random.uniform(low=-W_bound,high=W_bound,\ size=filter_shape).astype(theano.config.floatX)) conv_b = theano.shared(numpy.zeros(filter_shape[0], dtype=theano.config.floatX)) # bundle params += [ conv_W, conv_b ] names += [ prefix + '_conv_W_' + str(window), prefix + '_conv_b_' + str(window) ] conv_out = conv.conv2d(input=inps, filters=conv_W, filter_shape=filter_shape, image_shape=image_shape) conv_out_act = T.tanh(conv_out + conv_b.dimshuffle('x', 0, 'x', 'x')) conv_output = downsample.max_pool_2d(input=conv_out_act, ds=pool_size, ignore_border=True) return conv_output.flatten(2)
def output_func(self, input): # In input we get a tensor (batch_size, nwords, ndim) return downsample.max_pool_2d(input=input, ds=self.pool_size, ignore_border=True)
def predict(self, new_data, batch_size): img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3]) conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) if self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def model(X, w, w2, w3, w4, w_o, p_drop_conv, p_drop_hidden): l1a = rectify(conv2d(X, w, border_mode='full')) l1 = max_pool_2d(l1a, (2, 2)) l1 = dropout(l1, p_drop_conv) l2a = rectify(conv2d(l1, w2)) l2 = max_pool_2d(l2a, (2, 2)) l2 = dropout(l2, p_drop_conv) l3a = rectify(conv2d(l2, w3)) l3b = max_pool_2d(l3a, (2, 2)) l3 = T.flatten(l3b, outdim=2) l3 = dropout(l3, p_drop_conv) l4 = rectify(T.dot(l3, w4)) l4 = dropout(l4, p_drop_hidden) pyx = softmax(T.dot(l4, w_o)) return l1, l2, l3, l4, pyx
def op(self, state): X = self.l_in.op(state=state) return max_pool_2d(X, self.shape)
def set_input(self, input): # convolve input feature maps with filters conv_out = conv.conv2d(input=input, filters=self.W,filter_shape=self.filter_shape, image_shape=self.image_shape) if self.non_linear=="tanh": conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) elif self.non_linear=="relu": conv_out_tanh = ReLU(conv_out + self.b.dimshuffle('x', 0, 'x', 'x')) output = downsample.max_pool_2d(input=conv_out_tanh, ds=self.poolsize, ignore_border=True) else: pooled_out = downsample.max_pool_2d(input=conv_out, ds=self.poolsize, ignore_border=True) output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x') return output
def predict(self, lnew_data, rnew_data): """ predict for new data """ lconv_out = conv.conv2d(input=lnew_data, filters=self.W) rconv_out = conv.conv2d(input=rnew_data, filters=self.W) lconv_out_tanh = T.tanh(lconv_out + self.b.dimshuffle('x', 0, 'x', 'x')) rconv_out_tanh = T.tanh(rconv_out + self.b.dimshuffle('x', 0, 'x', 'x')) loutput = downsample.max_pool_2d(input=lconv_out_tanh, ds=self.poolsize, ignore_border=True, mode="max") routput = downsample.max_pool_2d(input=rconv_out_tanh, ds=self.poolsize, ignore_border=True, mode="max") return loutput, routput
def get_output(self, train=False): X = self.get_input(train) X = T.reshape(X, (X.shape[0], X.shape[1], X.shape[2], 1)).dimshuffle(0, 2, 1, 3) output = downsample.max_pool_2d(X, ds=self.pool_size, st=self.st, ignore_border=self.ignore_border) output = output.dimshuffle(0, 2, 1, 3) return T.reshape(output, (output.shape[0], output.shape[1], output.shape[2]))
def get_output(self, train=False): X = self.get_input(train) output = downsample.max_pool_2d(X, ds=self.pool_size, st=self.stride, ignore_border=self.ignore_border) return output
def output_func(self, input): return downsample.max_pool_2d(input, ds=self.maxpool_shape, ignore_border=self.ig_bor,st=self.st)
def get_output(self, train=False): #output = K.pool2d(x = train, pool_size = (self.pool_length,1), # border_mode = self.border_mode, pool_mode='max') pool_size = (self.pool_length, 1) strides = (self.pool_length, 1) ignore_border = True padding = (0, 0) output = downsample.max_pool_2d(train, ds=pool_size, st=strides, ignore_border=ignore_border, padding=padding, mode='max') return output
def step(self, input): # self.input = input # convolve input feature maps with filters # conv_out = t.conv.conv2d( # input=input, # filters=self.W, # filter_shape=filter_shape, # image_shape=image_shape # ) conv_out = conv.conv2d( input=input, filters=self.W, filter_shape=self.filter_shape, image_shape=self.image_shape, border_mode=self.border_mode ) # downsample each feature map individually, using maxpooling pooled_out = downsample.max_pool_2d( input=conv_out, ds=self.poolsize, ignore_border=True, ) # add the bias term. Since the bias is a vector (1D array), we first # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will # thus be broadcasted across mini-batches and feature map # width & height output = tt.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')) return output