我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用mxnet.Symbols()。
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ assert not use_batchnorm, "batchnorm not yet supported" bias = mx.symbol.Variable(name="conv{}_bias".format(name), init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'}) conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="conv{}".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}{}".format(act_type, name)) if use_batchnorm: relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name)) return conv, relu
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ bias = mx.symbol.Variable(name="{}_conv_bias".format(name), init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'}) conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="{}_conv".format(name), bias=bias) if use_batchnorm: conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}_{}".format(name, act_type)) return relu
def legacy_conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ assert not use_batchnorm, "batchnorm not yet supported" bias = mx.symbol.Variable(name="conv{}_bias".format(name), init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0'}) conv = mx.symbol.Convolution(data=from_layer, bias=bias, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="conv{}".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}{}".format(act_type, name)) if use_batchnorm: relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name)) return conv, relu
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ assert not use_batchnorm, "batchnorm not yet supported" conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="conv{}".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}{}".format(act_type, name)) if use_batchnorm: relu = mx.symbol.BatchNorm(data=relu, name="bn{}".format(name)) return conv, relu
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="{}_conv".format(name)) if use_batchnorm: conv = mx.symbol.BatchNorm(data=conv, name="{}_bn".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}_{}".format(name, act_type)) return relu
def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \ stride=(1,1), act_type="relu", use_batchnorm=False): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="conv{}".format(name)) if use_batchnorm: conv = mx.symbol.BatchNorm(data=conv, name="bn{}".format(name)) relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}{}".format(act_type, name)) return relu
def multi_layer_feature(body, from_layers, num_filters, strides, pads, min_filter=128): """Wrapper function to extract features from base network, attaching extra layers and SSD specific layers Parameters ---------- from_layers : list of str feature extraction layers, use '' for add extra layers For example: from_layers = ['relu4_3', 'fc7', '', '', '', ''] which means extract feature from relu4_3 and fc7, adding 4 extra layers on top of fc7 num_filters : list of int number of filters for extra layers, you can use -1 for extracted features, however, if normalization and scale is applied, the number of filter for that layer must be provided. For example: num_filters = [512, -1, 512, 256, 256, 256] strides : list of int strides for the 3x3 convolution appended, -1 can be used for extracted feature layers pads : list of int paddings for the 3x3 convolution, -1 can be used for extracted layers min_filter : int minimum number of filters used in 1x1 convolution Returns ------- list of mx.Symbols """ # arguments check assert len(from_layers) > 0 assert isinstance(from_layers[0], str) and len(from_layers[0].strip()) > 0 assert len(from_layers) == len(num_filters) == len(strides) == len(pads) internals = body.get_internals() layers = [] for k, params in enumerate(zip(from_layers, num_filters, strides, pads)): from_layer, num_filter, s, p = params if from_layer.strip(): # extract from base network layer = internals[from_layer.strip() + '_output'] layers.append(layer) else: # attach from last feature layer assert len(layers) > 0 assert num_filter > 0 layer = layers[-1] num_1x1 = max(min_filter, num_filter // 2) conv_1x1 = conv_act_layer(layer, 'multi_feat_%d_conv_1x1' % (k), num_1x1, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu') conv_3x3 = conv_act_layer(conv_1x1, 'multi_feat_%d_conv_3x3' % (k), num_filter, kernel=(3, 3), pad=(p, p), stride=(s, s), act_type='relu') layers.append(conv_3x3) return layers
def conv_act_layer(from_layer, name, num_filter, kernel=(3, 3), pad=(1, 1), \ stride=(1,1), act_type="relu", use_batchnorm=True): """ wrapper for a small Convolution group Parameters: ---------- from_layer : mx.symbol continue on which layer name : str base name of the new layers num_filter : int how many filters to use in Convolution layer kernel : tuple (int, int) kernel size (h, w) pad : tuple (int, int) padding size (h, w) stride : tuple (int, int) stride size (h, w) act_type : str activation type, can be relu... use_batchnorm : bool whether to use batch normalization Returns: ---------- (conv, relu) mx.Symbols """ conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \ stride=stride, num_filter=num_filter, name="{}".format(name)) if use_batchnorm: conv = mx.symbol.BatchNorm(data=conv, name="bn_{}".format(name)) if act_type in ['elu', 'leaky', 'prelu', 'rrelu']: relu = mx.symbol.LeakyReLU(data=conv, act_type=act_type, name="{}_{}".format(act_type, name), slope=0.1) elif act_type in ['relu', 'sigmoid', 'softrelu', 'tanh']: relu = mx.symbol.Activation(data=conv, act_type=act_type, \ name="{}_{}".format(act_type, name)) else: assert isinstance(act_type, str) raise ValueError("Invalid activation type: " + str(act_type)) return relu