我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用tensorflow.concat_v2()。
def omniglot(): sess = tf.InteractiveSession() """ def wrapper(v): return tf.Print(v, [v], message="Printing v") v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix') sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp') temp = wrapper(v) #with tf.control_dependencies([temp]): temp.eval() print 'Hello'""" def update_tensor(V, dim2, val): # Update tensor V, with index(:,dim2[:]) by val[:] val = tf.cast(val, V.dtype) def body(_, (v, d2, chg)): d2_int = tf.cast(d2, tf.int32) return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]]) Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update") return Z
def concatenate(tensors, axis=-1): """Concatenates a list of tensors alongside the specified axis. # Returns A tensor. """ if axis < 0: dims = ndim(tensors[0]) if dims: axis = axis % dims else: axis = 0 if py_all([is_sparse(x) for x in tensors]): return tf.sparse_concat(axis, tensors) else: if tf_major_version >= 1: return tf.concat([to_dense(x) for x in tensors], axis) else: try: return tf.concat_v2([to_dense(x) for x in tensors], axis) except AttributeError: return tf.concat(axis, [to_dense(x) for x in tensors])
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 35x35 resnet block.""" with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3') mixed = tf.concat_v2([tower_conv, tower_conv1_1, tower_conv2_2], 3) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 17x17 resnet block.""" with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7], scope='Conv2d_0b_1x7') tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1], scope='Conv2d_0c_7x1') mixed = tf.concat_v2([tower_conv, tower_conv1_2], 3) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): """Builds the 8x8 resnet block.""" with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): with tf.variable_scope('Branch_0'): tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') with tf.variable_scope('Branch_1'): tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3], scope='Conv2d_0b_1x3') tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1], scope='Conv2d_0c_3x1') mixed = tf.concat_v2([tower_conv, tower_conv1_2], 3) up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, activation_fn=None, scope='Conv2d_1x1') net += scale * up if activation_fn: net = activation_fn(net) return net
def block_inception_a(inputs, scope=None, reuse=None): """Builds Inception-A block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockInceptionA', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 96, [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 96, [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(inputs, 64, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 96, [1, 1], scope='Conv2d_0b_1x1') return tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
def block_reduction_a(inputs, scope=None, reuse=None): """Builds Reduction-A block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockReductionA', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 384, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, 256, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') return tf.concat_v2([branch_0, branch_1, branch_2], 3)
def block_reduction_b(inputs, scope=None, reuse=None): """Builds Reduction-B block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockReductionB', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 192, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 256, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 320, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 320, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(inputs, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') return tf.concat_v2([branch_0, branch_1, branch_2], 3)
def block_inception_c(inputs, scope=None, reuse=None): """Builds Inception-C block for Inception v4 network.""" # By default use stride=1 and SAME padding with slim.arg_scope([slim.conv2d, slim.avg_pool2d, slim.max_pool2d], stride=1, padding='SAME'): with tf.variable_scope(scope, 'BlockInceptionC', [inputs], reuse=reuse): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(inputs, 256, [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_1 = tf.concat_v2([ slim.conv2d(branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'), slim.conv2d(branch_1, 256, [3, 1], scope='Conv2d_0c_3x1')], 3) with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d(inputs, 384, [1, 1], scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, 448, [3, 1], scope='Conv2d_0b_3x1') branch_2 = slim.conv2d(branch_2, 512, [1, 3], scope='Conv2d_0c_1x3') branch_2 = tf.concat_v2([ slim.conv2d(branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'), slim.conv2d(branch_2, 256, [3, 1], scope='Conv2d_0e_3x1')], 3) with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(inputs, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d(branch_3, 256, [1, 1], scope='Conv2d_0b_1x1') return tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
def concatenate(tensors, axis=-1): '''Concatenates a list of tensors alongside the specified axis. ''' if axis < 0: dims = ndim(tensors[0]) if dims: axis = axis % dims else: axis = 0 if py_all([is_sparse(x) for x in tensors]): return tf.sparse_concat(axis, tensors) else: try: return tf.concat_v2([to_dense(x) for x in tensors], axis) except AttributeError: return tf.concat(axis, [to_dense(x) for x in tensors])
def concat(tensors, axis, *args, **kwargs): return tf.concat_v2(tensors, axis, *args, **kwargs)
def conv_cond_concat(x, y): """Concatenate conditioning vector on feature map axis.""" x_shapes = x.get_shape() y_shapes = y.get_shape() return tf.concat_v2([ x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
def average_gradients(tower_grads): """Calculate the average gradient for each shared variable across all towers. Note that this function provides a synchronization point across all towers. Args: tower_grads: List of lists of (gradient, variable) tuples. The outer list is over individual gradients. The inner list is over the gradient calculation for each tower. Returns: List of pairs of (gradient, variable) where the gradient has been averaged across all towers. """ average_grads = [] for grad_and_vars in zip(*tower_grads): # Note that each grad_and_vars looks like the following: # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) grads = [] for g, _ in grad_and_vars: # Add 0 dimension to the gradients to represent the tower. expanded_g = tf.expand_dims(g, 0) # Append on a 'tower' dimension which we will average over below. grads.append(expanded_g) # Average over the 'tower' dimension. grad = tf.concat_v2(grads, 0) grad = tf.reduce_mean(grad, 0) # Keep in mind that the Variables are redundant because they are shared # across towers. So .. we will just return the first tower's pointer to # the Variable. v = grad_and_vars[0][1] grad_and_var = (grad, v) average_grads.append(grad_and_var) return average_grads
def forward_tensor(self, x): N, D = tf.shape(x)[0], tf.shape(x)[2] xm = tf.slice(x, [0, 0, 0], tf.stack([N - 1, -1, -1])) xp = x[1:, :, :] diagblocks = tf.matmul(x, x, transpose_a=True) offblocks = tf.concat_v2([tf.matmul(xm, xp, transpose_a=True), tf.zeros((1, D, D), 0, dtype=tf.float64)]) return tf.stack([diagblocks, offblocks])
def l2_norm(weights): weights_flat = [] for weight in weights: weights_flat = tf.concat_v2([weights_flat, tf.reshape(weight, [-1])], axis=0) return tf.reduce_mean(tf.square(weights_flat))
def build_model(self): sc = predictron_arg_scope() with tf.variable_scope('state'): with slim.arg_scope(sc): state = slim.conv2d(self.inputs, 32, [3, 3], scope='conv1') state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv1/preact') state = slim.conv2d(state, 32, [3, 3], scope='conv2') state = layers.batch_norm(state, activation_fn=tf.nn.relu, scope='conv2/preact') iter_template = tf.make_template('iter', self.iter_func, unique_name_='iter') rewards_arr = [] gammas_arr = [] lambdas_arr = [] values_arr = [] for k in range(self.max_depth): state, reward, gamma, lambda_, value = iter_template(state) rewards_arr.append(reward) gammas_arr.append(gamma) lambdas_arr.append(lambda_) values_arr.append(value) _, _, _, _, value = iter_template(state) # K + 1 elements values_arr.append(value) bs = tf.shape(self.inputs)[0] # [batch_size, K * maze_size] self.rewards = tf.pack(rewards_arr, axis=1) # [batch_size, K, maze_size] self.rewards = tf.reshape(self.rewards, [bs, self.max_depth, self.maze_size]) # [batch_size, K + 1, maze_size] self.rewards = tf.concat_v2(values=[tf.zeros(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.rewards], axis=1, name='rewards') # [batch_size, K * maze_size] self.gammas = tf.pack(gammas_arr, axis=1) # [batch_size, K, maze_size] self.gammas = tf.reshape(self.gammas, [bs, self.max_depth, self.maze_size]) # [batch_size, K + 1, maze_size] self.gammas = tf.concat_v2(values=[tf.ones(shape=[bs, 1, self.maze_size], dtype=tf.float32), self.gammas], axis=1, name='gammas') # [batch_size, K * maze_size] self.lambdas = tf.pack(lambdas_arr, axis=1) # [batch_size, K, maze_size] self.lambdas = tf.reshape(self.lambdas, [-1, self.max_depth, self.maze_size]) # [batch_size, (K + 1) * maze_size] self.values = tf.pack(values_arr, axis=1) # [batch_size, K + 1, maze_size] self.values = tf.reshape(self.values, [-1, (self.max_depth + 1), self.maze_size]) self.build_preturns() self.build_lambda_preturns()
def _inference(self, docs, queries): """ Computes document attentions given a document batch and query batch. """ with tf.name_scope("inference"): # Compute document lengths / query lengths for batch doc_lens = length(docs) query_lens = length(queries) batch_size = tf.shape(docs)[0] with tf.variable_scope('encode'): # Encode Document / Query with tf.variable_scope('docs'), tf.device('/gpu:0'): encoded_docs = tf.nn.dropout(self._embed(docs), self._keep_prob) encoded_docs = self._bidirectional_encode(encoded_docs, doc_lens, self._encode_size) with tf.variable_scope('queries'), tf.device('/gpu:1'): encoded_queries = tf.nn.dropout(self._embed(queries), self._keep_prob) encoded_queries = self._bidirectional_encode(encoded_queries, query_lens, self._encode_size) with tf.variable_scope('attend') as scope: infer_gru = tf.nn.rnn_cell.GRUCell(self._infer_size) infer_state = infer_gru.zero_state(batch_size, tf.float32) for iter_step in range(self._num_glimpses): if iter_step > 0: scope.reuse_variables() # Glimpse query and document with tf.device('/gpu:0'): q_attention, q_glimpse = self._glimpse(self._A_q, self._a_q, encoded_queries, infer_state) tf.add_to_collection('query_attentions', q_attention) with tf.device('/gpu:1'): d_attention, d_glimpse = self._glimpse(self._A_d, self._a_d, encoded_docs, tf.concat_v2([infer_state, q_glimpse], 1)) tf.add_to_collection('doc_attentions', d_attention) # Search Gates gate_concat = tf.concat_v2([infer_state, q_glimpse, d_glimpse, q_glimpse * d_glimpse], 1) r_d = tf.sigmoid(tf.matmul(gate_concat, self._g_d)) r_d = tf.nn.dropout(r_d, self._keep_prob) r_q = tf.sigmoid(tf.matmul(gate_concat, self._g_q)) r_q = tf.nn.dropout(r_q, self._keep_prob) combined_gated_glimpse = tf.concat_v2([r_q * q_glimpse, r_d * d_glimpse], 1) _, infer_state = infer_gru(combined_gated_glimpse, infer_state) return tf.to_float(tf.sign(tf.abs(docs))) * d_attention