我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.contrib.slim.arg_scope()。
def create_architecture(self, mode, tag=None): training = mode == 'TRAIN' testing = mode == 'TEST' assert tag != None # handle most of the regularizers here weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY) biases_regularizer = weights_regularizer # list as many types of layers as possible, even if they are not used now with arg_scope([slim.conv2d, slim.conv2d_in_plane, slim.conv2d_transpose, slim.separable_conv2d, slim.fully_connected], weights_regularizer=weights_regularizer, biases_regularizer=biases_regularizer, biases_initializer=tf.constant_initializer(0.0)): self.build_network() elbo = self.add_losses() self._summary_op = tf.summary.merge_all() return elbo
def fc_net(inp, layers, out_layers, scope, lamba=1e-3, activation=tf.nn.relu, reuse=None, weights_initializer=initializers.xavier_initializer(uniform=False)): with slim.arg_scope([slim.fully_connected], activation_fn=activation, normalizer_fn=None, weights_initializer=weights_initializer, reuse=reuse, weights_regularizer=slim.l2_regularizer(lamba)): if layers: h = slim.stack(inp, slim.fully_connected, layers, scope=scope) if not out_layers: return h else: h = inp outputs = [] for i, (outdim, activation) in enumerate(out_layers): o1 = slim.fully_connected(h, outdim, activation_fn=activation, scope=scope + '_{}'.format(i + 1)) outputs.append(o1) return outputs if len(outputs) > 1 else outputs[0]
def separable_conv(self, input, k_h, k_w, c_o, stride, name, relu=True): with slim.arg_scope([slim.batch_norm], fused=common.batchnorm_fused): output = slim.separable_convolution2d(input, num_outputs=None, stride=stride, trainable=self.trainable, depth_multiplier=1.0, kernel_size=[k_h, k_w], activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer(), # weights_initializer=tf.truncated_normal_initializer(stddev=0.09), weights_regularizer=tf.contrib.layers.l2_regularizer(0.00004), biases_initializer=None, padding=DEFAULT_PADDING, scope=name + '_depthwise') output = slim.convolution2d(output, c_o, stride=1, kernel_size=[1, 1], activation_fn=tf.nn.relu if relu else None, weights_initializer=tf.contrib.layers.xavier_initializer(), # weights_initializer=tf.truncated_normal_initializer(stddev=0.09), biases_initializer=slim.init_ops.zeros_initializer(), normalizer_fn=slim.batch_norm, trainable=self.trainable, weights_regularizer=tf.contrib.layers.l2_regularizer(common.regularizer_dsconv), # weights_regularizer=None, scope=name + '_pointwise') return output
def resnet_arg_scope(is_training=True, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): batch_norm_params = { 'is_training': False, 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'trainable': False, 'updates_collections': tf.GraphKeys.UPDATE_OPS } with arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), weights_initializer=slim.variance_scaling_initializer(), trainable=is_training, activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc: return arg_sc
def _image_to_head(self, is_training, reuse=False): # Base bottleneck assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12) net_conv = self._image if cfg.MOBILENET.FIXED_LAYERS > 0: with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)): net_conv = mobilenet_v1_base(net_conv, _CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS], starting_layer=0, depth_multiplier=self._depth_multiplier, reuse=reuse, scope=self._scope) if cfg.MOBILENET.FIXED_LAYERS < 12: with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)): net_conv = mobilenet_v1_base(net_conv, _CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12], starting_layer=cfg.MOBILENET.FIXED_LAYERS, depth_multiplier=self._depth_multiplier, reuse=reuse, scope=self._scope) self._act_summaries.append(net_conv) self._layers['head'] = net_conv return net_conv
def _middle_conv(self, stage): with tf.variable_scope('stage_' + str(stage)): self.current_featuremap = tf.concat([self.stage_heatmap[stage-2], self.sub_stage_img_feature, self.center_map], axis=3) with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=tf.nn.relu, weights_initializer=tf.contrib.layers.xavier_initializer()): mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5') mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6') self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1], scope='mid_conv7') self.stage_heatmap.append(self.current_heatmap)
def _middle_conv(self, stage): with tf.variable_scope('stage_' + str(stage)): self.current_featuremap = tf.concat([self.stage_heatmap[stage-2], self.sub_stage_img_feature, # self.center_map, ], axis=3) with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=tf.nn.relu, weights_initializer=tf.contrib.layers.xavier_initializer()): mid_net = slim.conv2d(self.current_featuremap, 128, [7, 7], scope='mid_conv1') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv2') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv3') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv4') mid_net = slim.conv2d(mid_net, 128, [7, 7], scope='mid_conv5') mid_net = slim.conv2d(mid_net, 128, [1, 1], scope='mid_conv6') self.current_heatmap = slim.conv2d(mid_net, self.joints, [1, 1], scope='mid_conv7') self.stage_heatmap.append(self.current_heatmap)
def arg_scope(self): """Configure the neural network's layers.""" batch_norm_params = { "is_training" : self.is_training, "decay" : 0.9997, "epsilon" : 0.001, "variables_collections" : { "beta" : None, "gamma" : None, "moving_mean" : ["moving_vars"], "moving_variance" : ["moving_vars"] } } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer( stddev=self._hparams.init_stddev), weights_regularizer=slim.l2_regularizer( self._hparams.regularize_constant), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as sc: return sc
def adversarial_discriminator(net, layers, scope='adversary', leaky=False): if leaky: activation_fn = tflearn.activations.leaky_relu else: activation_fn = tf.nn.relu with ExitStack() as stack: stack.enter_context(tf.variable_scope(scope)) stack.enter_context( slim.arg_scope( [slim.fully_connected], activation_fn=activation_fn, weights_regularizer=slim.l2_regularizer(2.5e-5))) for dim in layers: net = slim.fully_connected(net, dim) net = slim.fully_connected(net, 2, activation_fn=None) return net
def vgg_arg_scope(weight_decay=0.0005): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(weight_decay), biases_initializer=tf.zeros_initializer()): with slim.arg_scope([slim.conv2d], padding='SAME') as arg_sc: with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001, activation_fn=None, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS, } with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
def _extra_conv_arg_scope(weight_decay=0.00001, activation_fn=None, normalizer_fn=None): with slim.arg_scope( [slim.conv2d, slim.conv2d_transpose], padding='SAME', weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.001), activation_fn=activation_fn, normalizer_fn=normalizer_fn,) as arg_sc: with slim.arg_scope( [slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.truncated_normal_initializer(stddev=0.001), activation_fn=activation_fn, normalizer_fn=normalizer_fn) as arg_sc: return arg_sc
def model(self, input_text_begin, input_text_end, gene, variation, expected_labels, batch_size, vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9): # embeddings embeddings = _load_embeddings(vocabulary_size, embeddings_size) # model with slim.arg_scope(self.text_classification_model.model_arg_scope()): outputs = self.text_classification_model.model(input_text_begin, input_text_end, gene, variation, output_classes, embeddings=embeddings, batch_size=batch_size, training=False) # loss targets = self.text_classification_model.targets(expected_labels, output_classes) loss = self.text_classification_model.loss(targets, outputs) self.accumulated_loss = tf.Variable(0.0, dtype=tf.float32, name='accumulated_loss', trainable=False) self.accumulated_loss = tf.assign_add(self.accumulated_loss, loss) step = tf.Variable(0, dtype=tf.int32, name='eval_step', trainable=False) step_increase = tf.assign_add(step, 1) self.loss = self.accumulated_loss / tf.cast(step_increase, dtype=tf.float32) tf.summary.scalar('loss', self.loss) # metrics self.metrics = metrics.single_label(outputs['prediction'], targets, moving_average=False) return None
def model(self, input_text_begin, input_text_end, gene, variation, batch_size, vocabulary_size=VOCABULARY_SIZE, embeddings_size=EMBEDDINGS_SIZE, output_classes=9): # embeddings embeddings = _load_embeddings(vocabulary_size, embeddings_size) # global step self.global_step = training_util.get_or_create_global_step() self.global_step = tf.assign_add(self.global_step, 1) # model with tf.control_dependencies([self.global_step]): with slim.arg_scope(self.text_classification_model.model_arg_scope()): self.outputs = self.text_classification_model.model(input_text_begin, input_text_end, gene, variation, output_classes, embeddings=embeddings, batch_size=batch_size, training=False) # restore only the trainable variables self.saver = tf.train.Saver(var_list=tf_variables.trainable_variables()) return self.outputs
def squeezenet(inputs, num_classes=1000, is_training=True, keep_prob=0.5, spatial_squeeze=True, scope='squeeze'): """ squeezenetv1.1 """ with tf.name_scope(scope, 'squeeze', [inputs]) as sc: end_points_collection = sc + '_end_points' # Collect outputs for conv2d, fully_connected and max_pool2d. with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d, fire_module], outputs_collections=end_points_collection): nets = squeezenet_inference(inputs, is_training, keep_prob) nets = slim.conv2d(nets, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='logits') end_points = slim.utils.convert_collection_to_dict(end_points_collection) if spatial_squeeze: nets = tf.squeeze(nets, [1, 2], name='logits/squeezed') return nets, end_points
def build_model(self): with slim.arg_scope([slim.conv2d, slim.conv2d_transpose], activation_fn=tf.nn.elu): with tf.variable_scope('model', reuse=self.reuse_variables): self.left_pyramid = self.scale_pyramid(self.left, 4) if self.mode == 'train': self.right_pyramid = self.scale_pyramid(self.right, 4) if self.params.do_stereo: self.model_input = tf.concat([self.left, self.right], 3) else: self.model_input = self.left #build model if self.params.encoder == 'vgg': self.build_vgg() elif self.params.encoder == 'resnet50': self.build_resnet50() else: return None
def gcn_block(inputs, num_class, kernel_size, scope=None): with tf.variable_scope(scope, 'gcn_block', [inputs]): with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None, normalizer_fn=None, normalizer_params=None, weights_initializer=tf.contrib.layers.xavier_initializer(), weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001), biases_initializer=tf.zeros_initializer(), biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)): left_conv1 = slim.conv2d(inputs, num_class, [kernel_size, 1]) left_conv2 = slim.conv2d(left_conv1, num_class, [1, kernel_size]) right_conv1 = slim.conv2d(inputs, num_class, [1, kernel_size]) right_conv2 = slim.conv2d(right_conv1, num_class, [kernel_size, 1]) result_sum = tf.add(left_conv2, right_conv2, name='gcn_module') return result_sum
def gcn_br(inputs, scope): with tf.variable_scope(scope, 'gcn_br', [inputs]): with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=tf.nn.relu, normalizer_fn=None, normalizer_params=None, weights_initializer=tf.contrib.layers.xavier_initializer(), weights_regularizer=tf.contrib.layers.l2_regularizer(0.0001), biases_initializer=tf.zeros_initializer(), biases_regularizer=tf.contrib.layers.l2_regularizer(0.0002)): num_class = inputs.get_shape()[3] conv = slim.conv2d(inputs, num_class, [3, 3]) conv = slim.conv2d(conv, num_class, [3, 3], activation_fn=None) result_sum = tf.add(inputs, conv, name='fcn_br') return result_sum
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None): batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): return inception_resnet_v2(images, is_training=phase_train, dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def inference(images, keep_probability, phase_train=True, bottleneck_layer_size=128, weight_decay=0.0, reuse=None): batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, # epsilon to prevent 0s in variance. 'epsilon': 0.001, # force in-place updates of mean and variance estimates 'updates_collections': None, # Moving averages ends up in the trainable variables collection 'variables_collections': [ tf.GraphKeys.TRAINABLE_VARIABLES ], } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): return inception_resnet_v1(images, is_training=phase_train, dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def encoder(self, images, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('encoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = images net = slim.conv2d(net, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1a') net = slim.repeat(net, 3, conv2d_block, 0.1, 32, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_1b') net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2a') net = slim.repeat(net, 3, conv2d_block, 0.1, 64, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_2b') net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3a') net = slim.repeat(net, 3, conv2d_block, 0.1, 128, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_3b') net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4a') net = slim.repeat(net, 3, conv2d_block, 0.1, 256, [4, 4], 1, activation_fn=activation_fn, scope='Conv2d_4b') net = slim.flatten(net) fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1') fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2') return fc1, fc2
def encoder(self, images, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('encoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1') net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2') net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3') net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4') net = slim.flatten(net) fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1') fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2') return fc1, fc2
def encoder(self, images, is_training): activation_fn = leaky_relu # tf.nn.relu weight_decay = 0.0 with tf.variable_scope('encoder'): with slim.arg_scope([slim.batch_norm], is_training=is_training): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=self.batch_norm_params): net = slim.conv2d(images, 32, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_1') net = slim.conv2d(net, 64, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_2') net = slim.conv2d(net, 128, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_3') net = slim.conv2d(net, 256, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_4') net = slim.conv2d(net, 512, [4, 4], 2, activation_fn=activation_fn, scope='Conv2d_5') net = slim.flatten(net) fc1 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_1') fc2 = slim.fully_connected(net, self.latent_variable_dim, activation_fn=None, normalizer_fn=None, scope='Fc_2') return fc1, fc2
def inference(inputs, keep_prob, bottleneck_size=128, phase_train=True, weight_decay=0.0, reuse=None): batch_norm_params = { 'decay': 0.995, 'epsilon': 0.001, 'updates_collections': None, # 'scale': True, # [test1] 'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2: removed from 'trainable_variables'] with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), biases_regularizer=slim.l2_regularizer(weight_decay), # [test4: add weight_decay to biases]): normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): return inception_resnet_v2( inputs, is_training=phase_train, keep_prob=keep_prob, bottleneck_size=bottleneck_size, reuse=reuse)
def inference(inputs, keep_prob, bottleneck_size=128, phase_train=True, weight_decay=0.0, reuse=None): batch_norm_params = { 'decay': 0.995, 'epsilon': 0.001, 'updates_collections': None, # 'scale': True} # [test1: add 'gamma'] 'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2: removed from 'trainable_variables'] with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), biases_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): # [test4: add weight_decay to biases]): return inception_resnet_v2( inputs, is_training=phase_train, keep_prob=keep_prob, bottleneck_size=bottleneck_size, reuse=reuse)
def inference(inputs, keep_prob, bottleneck_size=128, phase_train=True, weight_decay=0.0, reuse=None): batch_norm_params = { 'decay': 0.995, 'epsilon': 0.001, 'updates_collections': None, # 'scale': True, # [test1] 'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES]} # [test2] with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): return inception_resnet_v1(inputs, is_training=phase_train, keep_prob=keep_prob, bottleneck_size=bottleneck_size, reuse=reuse)
def _extra_conv_arg_scope_with_bn(weight_decay=0.00001, activation_fn=None, batch_norm_decay=0.997, batch_norm_epsilon=1e-5, batch_norm_scale=True): batch_norm_params = { 'decay': batch_norm_decay, 'epsilon': batch_norm_epsilon, 'scale': batch_norm_scale, 'updates_collections': tf.GraphKeys.UPDATE_OPS_EXTRA, } with slim.arg_scope( [slim.conv2d], weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=slim.variance_scaling_initializer(), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): with slim.arg_scope([slim.batch_norm], **batch_norm_params): with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: return arg_sc
def _image_to_head(self, is_training, reuse=None): # Base bottleneck assert (0 <= cfg.MOBILENET.FIXED_LAYERS <= 12) net_conv = self._image if cfg.MOBILENET.FIXED_LAYERS > 0: with slim.arg_scope(mobilenet_v1_arg_scope(is_training=False)): net_conv = mobilenet_v1_base(net_conv, _CONV_DEFS[:cfg.MOBILENET.FIXED_LAYERS], starting_layer=0, depth_multiplier=self._depth_multiplier, reuse=reuse, scope=self._scope) if cfg.MOBILENET.FIXED_LAYERS < 12: with slim.arg_scope(mobilenet_v1_arg_scope(is_training=is_training)): net_conv = mobilenet_v1_base(net_conv, _CONV_DEFS[cfg.MOBILENET.FIXED_LAYERS:12], starting_layer=cfg.MOBILENET.FIXED_LAYERS, depth_multiplier=self._depth_multiplier, reuse=reuse, scope=self._scope) self._act_summaries.append(net_conv) self._layers['head'] = net_conv return net_conv
def __arg_scope(self, weight_decay=0.0005, data_format='NHWC'): """Defines the VGG arg scope. Args: weight_decay: The l2 regularization coefficient. Returns: An arg_scope. """ with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_regularizer=slim.l2_regularizer(weight_decay), weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=tf.zeros_initializer()): with slim.arg_scope([slim.conv2d, slim.max_pool2d], padding='SAME', data_format=data_format): with slim.arg_scope([custom_layers.pad2d, custom_layers.l2_normalization, custom_layers.channel_to_last], data_format=data_format) as sc: return sc
def build_model(input, image_size=64): with slim.arg_scope([slim.conv2d_transpose], kernel_size=[5, 5], stride=2, activation_fn=None): net = linear(input, 2 * image_size * image_size, 'generator/linear_1') # output_size=2^13 net = tf.reshape(net, [-1, image_size // 16, image_size // 16, 512], name='generator/reshape_2') net = BatchNorm(net, name="batch_norm_3") net = tf.nn.relu(net) net = slim.conv2d_transpose(inputs=net, num_outputs=256, padding="SAME", name="generator/deconv_4") net = BatchNorm(net, name="batch_norm_5") net = tf.nn.relu(net) net = slim.conv2d_transpose(inputs=net, num_outputs=128, padding="SAME", name="generator/deconv_6") net = BatchNorm(net, name="batch_norm_7") net = tf.nn.relu(net) net = slim.conv2d_transpose(inputs=net, num_outputs=64, padding="SAME", name="generator/deconv_8") net = BatchNorm(net, name="batch_norm_9") net = tf.nn.relu(net) net = slim.conv2d_transpose(inputs=net, num_outputs=3, padding="SAME", name="generator/deconv_10") net = tf.nn.tanh(net) return net
def encoder(self, x, embedding, reuse=None): with tf.variable_scope("encoder", reuse=reuse): with slim.arg_scope([slim.conv2d], stride=1, activation_fn=tf.nn.elu, padding="SAME", weights_initializer=tf.contrib.layers.variance_scaling_initializer(), weights_regularizer=slim.l2_regularizer(5e-4), bias_initializer=tf.zeros_initializer()): x = slim.conv2d(x, embedding, 3) for i in range(self.conv_repeat_num): channel_num = embedding * (i + 1) x = slim.repeat(x, 2, slim.conv2d, channel_num, 3) if i < self.conv_repeat_num - 1: # Is using stride pooling more better method than max pooling? # or average pooling # x = slim.conv2d(x, channel_num, kernel_size=3, stride=2) # sub-sampling x = slim.avg_pool2d(x, kernel_size=2, stride=2) # x = slim.max_pooling2d(x, 3, 2) x = tf.reshape(x, [-1, np.prod([8, 8, channel_num])]) return x
def decoder(self, z, embedding, reuse=None): with tf.variable_scope("decoder", reuse=reuse): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.contrib.layers.variance_scaling_initializer(), weights_regularizer=slim.l2_regularizer(5e-4), bias_initializer=tf.zeros_initializer()): with slim.arg_scope([slim.conv2d], padding="SAME", activation_fn=tf.nn.elu, stride=1): x = slim.fully_connected(z, 8 * 8 * embedding, activation_fn=None) x = tf.reshape(x, [-1, 8, 8, embedding]) for i in range(self.conv_repeat_num): x = slim.repeat(x, 2, slim.conv2d, embedding, 3) if i < self.conv_repeat_num - 1: x = resize_nn(x, 2) # NN up-sampling x = slim.conv2d(x, 3, 3, activation_fn=None) return x
def discriminator(self, x, name, reuse=None): with tf.variable_scope(name, reuse=reuse): with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.contrib.layers.variance_scaling_initializer(), weights_regularizer=slim.l2_regularizer(2e-4)): with slim.arg_scope([slim.conv2d], padding="SAME", stride=2, kernel_size=4): net = slim.conv2d(x, self.df_dim) net = lrelu(net) mul = 2 for bn in self.d_bn: net = slim.conv2d(net, self.df_dim * mul) net = bn(net) net = lrelu(net) mul *= 2 net = tf.reshape(net, shape=[-1, 2*2*512]) net = slim.fully_connected(net, 512, activation_fn=lrelu, normalizer_fn=slim.batch_norm) net = slim.fully_connected(net, 1, activation_fn=tf.nn.sigmoid) return net # return prob
def mlp_conv(self, x, kernel_size, stride, num_filters, micro_layer_size, name): """ multi layer perceptron convolution. :param num_filters: number of micro_net filter :param micro_layer_size: [hidden_layer] :return: """ with tf.variable_scope(name, values=[x]): # first convolution net = slim.conv2d(inputs=x, num_outputs=num_filters, kernel_size=[kernel_size, kernel_size], stride=stride, scope='first_conv', padding='SAME') # cccp layer with slim.arg_scope([slim.conv2d], kernel_size=[1, 1], stride=1, padding='VALID', activation_fn=tf.nn.relu): for hidden_i, hidden_size in enumerate(micro_layer_size): net = slim.conv2d(net, hidden_size, scope='hidden_' + str(hidden_i)) return net
def likelihood(self, z, reuse=False): """Build likelihood p(x | z_0). """ cfg = self.config n_samples = z.get_shape().as_list()[0] with util.get_or_create_scope('model', reuse=reuse): n_out = int(np.prod(cfg['train_data/shape'])) net = z with slim.arg_scope( [slim.fully_connected], activation_fn=util.get_activation(cfg['p_net/activation']), outputs_collections=[tf.GraphKeys.ACTIVATIONS], variables_collections=['model'], weights_initializer=layers.variance_scaling_initializer( factor=np.square(cfg['p_net/init_w_stddev']))): for i in range(cfg['p_net/n_layers']): net = slim.fully_connected( net, cfg['p_net/hidden_size'], scope='fc%d' % i) logits = slim.fully_connected( net, n_out, activation_fn=None, scope='fc_lik') logits = tf.reshape( logits, [n_samples, cfg['batch_size']] + cfg['train_data/shape']) return dist.Bernoulli(logits=logits, validate_args=False)
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=[end_points_collection]): with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training}, activation_fn=leaky_relu): net = slim.conv2d(inputs, 32, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') net = slim.conv2d(net, 64, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') net = slim.conv2d(net, 128, [3, 3], scope='conv3') net = slim.conv2d(net, 64, [1, 1], scope='conv4') box_net = net = slim.conv2d(net, 128, [3, 3], scope='conv5') net = slim.max_pool2d(net, [2, 2], 2, scope='pool5') net = slim.conv2d(net, 256, [3, 3], scope='conv6') net = slim.conv2d(net, 128, [1, 1], scope='conv7') net = slim.conv2d(net, 256, [3, 3], scope='conv8') box_net = _reorg(box_net, 2) net = tf.concat([box_net, net], 3) net = slim.conv2d(net, 256, [3, 3], scope='conv9') net = slim.conv2d(net, 75, [1, 1], activation_fn=None, scope='conv10') return net, end_points_collection
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=[end_points_collection]): net = slim.conv2d(inputs, 48, [5, 5], scope='conv1') net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') net = slim.conv2d(net, 64, [5, 5], scope='conv2') net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') net = slim.conv2d(net, 128, [5, 5], scope='conv3') net = slim.max_pool2d(net, [2, 2], 2, scope='pool3') net = slim.conv2d(net, 160, [5, 5], scope='conv4') net = slim.conv2d(net, 192, [5, 5], scope='conv5') net = slim.conv2d(net, 192, [5, 5], scope='conv6') net = slim.conv2d(net, 192, [5, 5], scope='conv7') net = slim.flatten(net) # By removing the fc layer, we'll get much smaller model with almost the same performance # net = slim.fully_connected(net, 3072, scope='fc8') return net, end_points_collection
def fc_layers(net, scope, end_points_collection, num_classes=10, is_training=True, dropout_keep_prob=0.5, name_prefix=None): def full_scope_name(scope_name): return scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name) with slim.arg_scope([slim.fully_connected, slim.dropout], outputs_collections=[end_points_collection]): net = slim.fully_connected(net, num_classes, activation_fn=None, scope=full_scope_name('fc9')) return net, end_points_collection
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=[end_points_collection]): net = slim.conv2d(inputs, 32, [5, 5], scope='conv1') net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') net = slim.conv2d(net, 64, [5, 5], scope='conv2') net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') net = slim.conv2d(net, 64, [5, 5], scope='conv3') net = slim.max_pool2d(net, [2, 2], 2, scope='pool3') net = slim.conv2d(net, 64, [5, 5], scope='conv4') net = slim.conv2d(net, 64, [5, 5], scope='conv5') net = slim.conv2d(net, 64, [5, 5], scope='conv6') net = slim.conv2d(net, 64, [5, 5], scope='conv7') net = slim.flatten(net) net = slim.fully_connected(net, 256, scope='fc3') return net, end_points_collection
def cnn_layers(inputs, scope, end_points_collection, dropout_keep_prob=0.8, is_training=True): with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d], outputs_collections=[end_points_collection]): net = slim.conv2d(inputs, 32, [5, 5], scope='conv1') net = slim.max_pool2d(net, [2, 2], 2, scope='pool1') net = slim.conv2d(net, 64, [5, 5], scope='conv2') net = slim.max_pool2d(net, [2, 2], 2, scope='pool2') net = slim.conv2d(net, 64, [5, 5], scope='conv3') net = slim.max_pool2d(net, [2, 2], 2, scope='pool3') net = slim.conv2d(net, 64, [5, 5], scope='conv4') net = slim.conv2d(net, 64, [5, 5], scope='conv5') net = slim.conv2d(net, 64, [5, 5], scope='conv6') net = slim.conv2d(net, 64, [5, 5], scope='conv7') net = slim.flatten(net) net = slim.fully_connected(net, 128, scope='fc3') return net, end_points_collection
def fc_layers(net, scope, end_points_collection, num_classes=10, is_training=True, dropout_keep_prob=0.5, name_prefix=None): def full_scope_name(scope_name): return scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name) with slim.arg_scope([slim.fully_connected, slim.dropout], outputs_collections=[end_points_collection]): ''' with droupout accuracy: 0.68, data: 4.2M without droupout accuracy: 0.71, data: 4.2M ''' # net = slim.dropout(net, dropout_keep_prob, is_training=is_training, # scope=full_scope_name('dropout3')) net = slim.fully_connected(net, num_classes, activation_fn=None, scope=full_scope_name('fc4')) return net, end_points_collection
def fc_layers(net, scope, end_points_collection, num_classes=10, is_training=True, dropout_keep_prob=0.5, name_prefix=None): def full_scope_name(scope_name): return scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name) with slim.arg_scope([slim.fully_connected, slim.dropout], outputs_collections=[end_points_collection]): net = slim.dropout(net, dropout_keep_prob, is_training=is_training, scope=full_scope_name('dropout3')) net = slim.fully_connected(net, num_classes, activation_fn=None, scope=full_scope_name('fc4')) return net, end_points_collection
def fc_layers(net, scope, end_points_collection, num_classes=1000, is_training=True, dropout_keep_prob=0.5, spatial_squeeze=True, name_prefix=None): full_scope_name = lambda scope_name: scope_name if name_prefix is None else '%s_%s' % (name_prefix, scope_name) # Use conv2d instead of fully_connected layers. with slim.arg_scope([slim.conv2d], weights_initializer=trunc_normal(0.005), biases_initializer=tf.constant_initializer(0.1), outputs_collections=[end_points_collection]): net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, biases_initializer=tf.zeros_initializer(), scope=full_scope_name('fc8')) if spatial_squeeze: net = tf.squeeze(net, [1, 2], name=full_scope_name('fc8/squeezed')) ops.add_to_collection(end_points_collection, net) return net, end_points_collection