我们从Python开源项目中,提取了以下45个代码示例,用于说明如何使用tensorflow.variable_op_scope()。
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9): shape = input.get_shape() num_out = shape[-1] with tf.variable_op_scope([input], scope, 'BN', reuse=reuse): beta = tf.get_variable('beta', [num_out], initializer=tf.constant_initializer(0.0), trainable=True) gamma = tf.get_variable('gamma', [num_out], initializer=tf.constant_initializer(1.0), trainable=True) batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \ if len(shape)==4 else tf.nn.moments(input, [0], name='moments') ema = tf.train.ExponentialMovingAverage(decay=decay) def mean_var_with_update(): ema_apply_op = ema.apply([batch_mean, batch_var]) with tf.control_dependencies([ema_apply_op]): return tf.identity(batch_mean), tf.identity(batch_var) mean, var = tf.cond(is_train, mean_var_with_update, lambda: (ema.average(batch_mean), ema.average(batch_var))) return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
def join(columns, coin): """Takes mean of the columns, applies drop path if `tflearn.get_training_mode()` is True. Args: columns: columns of fractal block. is_training: boolean in tensor form. Determines whether drop path should be used. coin: boolean in tensor form. Determines whether drop path is local or global. """ if len(columns)==1: return columns[0] with tf.variable_op_scope(columns, None, "Join"): columns = tf.convert_to_tensor(columns) columns = tf.cond(tflearn.get_training_mode(), lambda: drop_path(columns, coin), lambda: columns) out = tf.reduce_mean(columns, 0) return out
def BinarizedSpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1, padding='VALID', bias=True, reuse=None, name='BinarizedSpatialConvolution'): def b_conv2d(x, is_training=True): nInputPlane = x.get_shape().as_list()[3] with tf.variable_op_scope([x], None, name, reuse=reuse): w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer_conv2d()) bin_w = binarize(w) bin_x = binarize(x) ''' Note that we use binarized version of the input and the weights. Since the binarized function uses STE we calculate the gradients using bin_x and bin_w but we update w (the full precition version). ''' out = tf.nn.conv2d(bin_x, bin_w, strides=[1, dH, dW, 1], padding=padding) if bias: b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer) out = tf.nn.bias_add(out, b) return out return b_conv2d
def BinarizedWeightOnlySpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1, padding='VALID', bias=True, reuse=None, name='BinarizedWeightOnlySpatialConvolution'): ''' This function is used only at the first layer of the model as we dont want to binarized the RGB images ''' def bc_conv2d(x, is_training=True): nInputPlane = x.get_shape().as_list()[3] with tf.variable_op_scope([x], None, name, reuse=reuse): w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer_conv2d()) bin_w = binarize(w) out = tf.nn.conv2d(x, bin_w, strides=[1, dH, dW, 1], padding=padding) if bias: b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer) out = tf.nn.bias_add(out, b) return out return bc_conv2d
def BinarizedAffine(nOutputPlane, bias=True, name=None, reuse=None): def b_affineLayer(x, is_training=True): with tf.variable_op_scope([x], name, 'Affine', reuse=reuse): ''' Note that we use binarized version of the input (bin_x) and the weights (bin_w). Since the binarized function uses STE we calculate the gradients using bin_x and bin_w but we update w (the full precition version). ''' bin_x = binarize(x) reshaped = tf.reshape(bin_x, [x.get_shape().as_list()[0], -1]) nInputPlane = reshaped.get_shape().as_list()[1] w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer()) bin_w = binarize(w) output = tf.matmul(reshaped, bin_w) if bias: b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer) output = tf.nn.bias_add(output, b) return output return b_affineLayer
def repeat_op(repetitions, inputs, op, *args, **kwargs): """Build a sequential Tower starting from inputs by using an op repeatedly. It creates new scopes for each operation by increasing the counter. Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1') it will repeat the given op under the following variable_scopes: conv1/Conv conv1/Conv_1 conv1/Conv_2 Args: repetitions: number or repetitions. inputs: a tensor of size [batch_size, height, width, channels]. op: an operation. *args: args for the op. **kwargs: kwargs for the op. Returns: a tensor result of applying the operation op, num times. Raises: ValueError: if the op is unknown or wrong. """ scope = kwargs.pop('scope', None) with tf.variable_op_scope([inputs], scope, 'RepeatOp'): tower = inputs for _ in range(repetitions): tower = op(tower, *args, **kwargs) return tower
def coin_flip(prob=.5): """Random boolean variable, with `prob` chance of being true. Used to choose between local and global drop path. Args: prob:float, probability of being True. """ with tf.variable_op_scope([],None,"CoinFlip"): coin = tf.random_uniform([1])[0]>prob return coin
def drop_path(columns, coin): with tf.variable_op_scope([columns], None, "DropPath"): out = tf.cond(coin, lambda : drop_some(columns), lambda : random_column(columns)) return out
def SpatialConvolution(nOutputPlane, kW, kH, dW=1, dH=1, padding='VALID', bias=True, reuse=None, name='SpatialConvolution'): def conv2d(x, is_training=True): nInputPlane = x.get_shape().as_list()[3] with tf.variable_op_scope([x], None, name, reuse=reuse): w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer_conv2d()) out = tf.nn.conv2d(x, w, strides=[1, dH, dW, 1], padding=padding) if bias: b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer) out = tf.nn.bias_add(out, b) return out return conv2d
def Affine(nOutputPlane, bias=True, name=None, reuse=None): def affineLayer(x, is_training=True): with tf.variable_op_scope([x], name, 'Affine', reuse=reuse): reshaped = tf.reshape(x, [x.get_shape().as_list()[0], -1]) nInputPlane = reshaped.get_shape().as_list()[1] w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer()) output = tf.matmul(reshaped, w) if bias: b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer) output = tf.nn.bias_add(output, b) return output return affineLayer
def Dropout(p, name='Dropout'): def dropout_layer(x, is_training=True): with tf.variable_op_scope([x], None, name): # def drop(): return tf.nn.dropout(x,p) # def no_drop(): return x # return tf.cond(is_training, drop, no_drop) if is_training: return tf.nn.dropout(x,p) else: return x return dropout_layer
def ReLU(name='ReLU'): def layer(x, is_training=True): with tf.variable_op_scope([x], None, name): return tf.nn.relu(x) return layer
def HardTanh(name='HardTanh'): def layer(x, is_training=True): with tf.variable_op_scope([x], None, name): return tf.clip_by_value(x,-1,1) return layer
def View(shape, name='View'): with tf.variable_op_scope([x], None, name, reuse=reuse): return wrapNN(tf.reshape,shape=shape)
def SpatialMaxPooling(kW, kH=None, dW=None, dH=None, padding='VALID', name='SpatialMaxPooling'): kH = kH or kW dW = dW or kW dH = dH or kH def max_pool(x,is_training=True): with tf.variable_op_scope([x], None, name): return tf.nn.max_pool(x, ksize=[1, kW, kH, 1], strides=[1, dW, dH, 1], padding=padding) return max_pool
def Sequential(moduleList): def model(x, is_training=True): # Create model output = x #with tf.variable_op_scope([x], None, name): for i,m in enumerate(moduleList): output = m(output, is_training=is_training) tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, output) return output return model
def Concat(moduleList, dim=3): def model(x, is_training=True): # Create model outputs = [] for i,m in enumerate(moduleList): name = 'layer_'+str(i) with tf.variable_op_scope([x], name, 'Layer', reuse=reuse): outputs[i] = m(x, is_training=is_training) output = tf.concat(dim, outputs) return output return model
def Residual(moduleList, name='Residual'): m = Sequential(moduleList) def model(x, is_training=True): # Create model with tf.variable_op_scope([x], None, name): output = tf.add(m(x, is_training=is_training), x) return output return model
def policy(obs, theta, name='policy'): with tf.variable_op_scope([obs], name, name): h0 = tf.identity(obs, name='h0-obs') h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1') h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2') h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3') action = tf.nn.tanh(h3, name='h4-action') return action
def qfunction(obs, act, theta, name="qfunction"): with tf.variable_op_scope([obs, act], name, name): h0 = tf.identity(obs, name='h0-obs') h0a = tf.identity(act, name='h0-act') h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1') h1a = tf.concat(1, [h1, act]) h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name='h2') qs = tf.matmul(h2, theta[4]) + theta[5] q = tf.squeeze(qs, [1], name='h3-q') return q
def policy_network(state,theta,name='policy'): with tf.variable_op_scope([state],name,name): h0 = tf.identity(state,name='h0-state') h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1') h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2') h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3') action = tf.nn.tanh(h3,name='h4-action') return action
def q_network(state,action,theta, name="q_network"): with tf.variable_op_scope([state,action],name,name): h0 = tf.identity(state,name='h0-state') h0a = tf.identity(action,name='h0-act') h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1') h1a = tf.concat(1,[h1,action]) h2 = tf.nn.relu( tf.matmul(h1a,theta[2]) + theta[3],name='h2') qs = tf.matmul(h2,theta[4]) + theta[5] q = tf.squeeze(qs,[1],name='h3-q') return q
def fractal_template(inputs, num_columns, block_fn, block_asc, joined=True, is_training=True, reuse=False, scope=None): """Template for making fractal blocks. Given a function and a corresponding arg_scope `fractal_template` will build a truncated fractal with `num_columns` columns. Args: inputs: a 4-D tensor `[batch_size, height, width, channels]`. num_columns: integer, the columns in the fractal. block_fn: function to be called within each fractal. block_as: A function that returns argscope for `block_fn`. joined: boolean, whether the output columns should be joined. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. scope: Optional scope for `variable_scope`. """ def fractal_expand(inputs, num_columns, joined): '''Recursive Helper Function for making fractal''' with block_asc(): output = lambda cols: join(cols, coin) if joined else cols if num_columns == 1: return output([block_fn(inputs)]) left = block_fn(inputs) right = fractal_expand(inputs, num_columns-1, joined=True) right = fractal_expand(right, num_columns-1, joined=False) cols=[left]+right return output(cols) with tf.variable_op_scope([inputs], scope, 'Fractal', reuse=reuse) as scope: coin = coin_flip() net=fractal_expand(inputs, num_columns, joined) return net