我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.transpose()。
def inference(self): """main computation graph here: 1. embeddding layer, 2.Bi-LSTM layer, 3.concat, 4.FC layer 5.softmax """ #1.get emebedding of words in the sentence self.embedded_words = tf.nn.embedding_lookup(self.Embedding,self.input_x) #shape:[None,sentence_length,embed_size] #2. Bi-lstm layer # define lstm cess:get lstm cell output lstm_fw_cell=rnn.BasicLSTMCell(self.hidden_size) #forward direction cell lstm_bw_cell=rnn.BasicLSTMCell(self.hidden_size) #backward direction cell if self.dropout_keep_prob is not None: lstm_fw_cell=rnn.DropoutWrapper(lstm_fw_cell,output_keep_prob=self.dropout_keep_prob) lstm_bw_cell=rnn.DropoutWrapper(lstm_bw_cell,output_keep_prob=self.dropout_keep_prob) # bidirectional_dynamic_rnn: input: [batch_size, max_time, input_size] # output: A tuple (outputs, output_states) # where:outputs: A tuple (output_fw, output_bw) containing the forward and the backward rnn output `Tensor`. outputs,_=tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,self.embedded_words,dtype=tf.float32) #[batch_size,sequence_length,hidden_size] #creates a dynamic bidirectional recurrent neural network print("outputs:===>",outputs) #outputs:(<tf.Tensor 'bidirectional_rnn/fw/fw/transpose:0' shape=(?, 5, 100) dtype=float32>, <tf.Tensor 'ReverseV2:0' shape=(?, 5, 100) dtype=float32>)) #3. concat output output_rnn=tf.concat(outputs,axis=2) #[batch_size,sequence_length,hidden_size*2] self.output_rnn_last=tf.reduce_mean(output_rnn,axis=1) #[batch_size,hidden_size*2] #output_rnn_last=output_rnn[:,-1,:] ##[batch_size,hidden_size*2] #TODO print("output_rnn_last:", self.output_rnn_last) # <tf.Tensor 'strided_slice:0' shape=(?, 200) dtype=float32> #4. logits(use linear layer) with tf.name_scope("output"): #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. logits = tf.matmul(self.output_rnn_last, self.W_projection) + self.b_projection # [batch_size,num_classes] return logits
def feed_network(self,data,keep_prob,chunk_size,n_chunks,dynamic): # This code is copied from tflearn sequence_lengths = None if dynamic: sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data)) batch_size = tf.shape(data)[0] weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob) rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._gru_cell,output_keep_prob=keep_prob) # Calculation Begin input_shape = data.get_shape().as_list() ndim = len(input_shape) axis = [1, 0] + list(range(2,ndim)) data = tf.transpose(data,(axis)) sequence = tf.unstack(data) outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths) if dynamic: outputs = tf.transpose(tf.stack(outputs), [1, 0, 2]) output = net.advanced_indexing_op(outputs, sequence_lengths) else: output = outputs[-1] output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases) return output
def forward(self, x): length = lambda mx: int(mx.get_shape()[0]) with tf.variable_scope("QRNN/Forward"): if self.c is None: # init context cell self.c = tf.zeros([length(x), self.kernel.size], dtype=tf.float32) if self.conv_size <= 2: # x is batch_size x sentence_length x word_length # -> now, transpose it to sentence_length x batch_size x word_length _x = tf.transpose(x, [1, 0, 2]) for i in range(length(_x)): t = _x[i] # t is batch_size x word_length matrix f, z, o = self.kernel.forward(t) self._step(f, z, o) else: c_f, c_z, c_o = self.kernel.conv(x) for i in range(length(c_f)): f, z, o = c_f[i], c_z[i], c_o[i] self._step(f, z, o) return self.h
def baseline_forward(self, X, size, n_class): shape = X.get_shape() _X = tf.transpose(X, [1, 0, 2]) # batch_size x sentence_length x word_length -> batch_size x sentence_length x word_length _X = tf.reshape(_X, [-1, int(shape[2])]) # (batch_size x sentence_length) x word_length seq = tf.split(0, int(shape[1]), _X) # sentence_length x (batch_size x word_length) with tf.name_scope("LSTM"): lstm_cell = rnn_cell.BasicLSTMCell(size, forget_bias=1.0) outputs, states = rnn.rnn(lstm_cell, seq, dtype=tf.float32) with tf.name_scope("LSTM-Classifier"): W = tf.Variable(tf.random_normal([size, n_class]), name="W") b = tf.Variable(tf.random_normal([n_class]), name="b") output = tf.matmul(outputs[-1], W) + b return output
def cross_entropy_sequence_loss(logits, targets, sequence_length): """Calculates the per-example cross-entropy loss for a sequence of logits and masks out all losses passed the sequence length. Args: logits: Logits of shape `[T, B, vocab_size]` targets: Target classes of shape `[T, B]` sequence_length: An int32 tensor of shape `[B]` corresponding to the length of each input Returns: A tensor of shape [T, B] that contains the loss per example, per time step. """ with tf.name_scope("cross_entropy_sequence_loss"): losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=logits, labels=targets) # Mask out the losses we don't care about loss_mask = tf.sequence_mask( tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0])) losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0]) return losses
def compute_loss(self, decoder_output, _features, labels): """Computes the loss for this model. Returns a tuple `(losses, loss)`, where `losses` are the per-batch losses and loss is a single scalar tensor to minimize. """ #pylint: disable=R0201 # Calculate loss per example-timestep of shape [B, T] losses = seq2seq_losses.cross_entropy_sequence_loss( logits=decoder_output.logits[:, :, :], targets=tf.transpose(labels["target_ids"][:, 1:], [1, 0]), sequence_length=labels["target_len"] - 1) # Calculate the average log perplexity loss = tf.reduce_sum(losses) / tf.to_float( tf.reduce_sum(labels["target_len"] - 1)) return losses, loss
def get_loss(pred, label, end_points, reg_weight=0.001): """ pred: B*NUM_CLASSES, label: B, """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) classify_loss = tf.reduce_mean(loss) tf.summary.scalar('classify loss', classify_loss) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) tf.summary.scalar('mat loss', mat_diff_loss) return classify_loss + mat_diff_loss * reg_weight
def get_loss(pred, label, end_points, reg_weight=0.001): """ pred: BxNxC, label: BxN, """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label) classify_loss = tf.reduce_mean(loss) tf.scalar_summary('classify loss', classify_loss) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) mat_diff -= tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) tf.scalar_summary('mat_loss', mat_diff_loss) return classify_loss + mat_diff_loss * reg_weight
def get_loss(l_pred, seg_pred, label, seg, weight, end_points): per_instance_label_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=l_pred, labels=label) label_loss = tf.reduce_mean(per_instance_label_loss) # size of seg_pred is batch_size x point_num x part_cat_num # size of seg is batch_size x point_num per_instance_seg_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_pred, labels=seg), axis=1) seg_loss = tf.reduce_mean(per_instance_seg_loss) per_instance_seg_pred_res = tf.argmax(seg_pred, 2) # Enforce the transformation as orthogonal matrix transform = end_points['transform'] # BxKxK K = transform.get_shape()[1].value mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1])) - tf.constant(np.eye(K), dtype=tf.float32) mat_diff_loss = tf.nn.l2_loss(mat_diff) total_loss = weight * seg_loss + (1 - weight) * label_loss + mat_diff_loss * 1e-3 return total_loss, label_loss, per_instance_label_loss, seg_loss, per_instance_seg_loss, per_instance_seg_pred_res
def __init__(self, embedding): self.sess = tf.Session() self.inputs = tf.placeholder(tf.float32, [None, embedding.shape[1]], name='inputs') self.test_vec = tf.placeholder(tf.float32, [1, embedding.shape[1]], name='test_vec') self.cos_distance = tf.matmul(self.inputs, tf.transpose(self.test_vec)) #----------------------------------------------------------------------- # Compute normalized embedding matrix #----------------------------------------------------------------------- row_sum = tf.reduce_sum(tf.square(self.inputs), axis=1, keep_dims=True) norm = tf.sqrt(row_sum) self.normalized = self.inputs / norm self.embedding = self.sess.run(self.normalized, feed_dict={self.inputs: embedding}) #---------------------------------------------------------------------------
def make_png_thumbnail(x, n): ''' Input: `x`: Tensor, value range=[-1, 1), shape=[n*n, h, w, c] `n`: sqrt of the number of images Return: `tf.string` (bytes) of the PNG. (write these binary directly into a file) ''' with tf.name_scope('MakeThumbnail'): _, h, w, c = x.get_shape().as_list() x = tf.reshape(x, [n, n, h, w, c]) x = tf.transpose(x, [0, 2, 1, 3, 4]) x = tf.reshape(x, [n * h, n * w, c]) x = x / 2. + .5 x = tf.image.convert_image_dtype(x, tf.uint8, saturate=True) x = tf.image.encode_png(x) return x
def make_png_jet_thumbnail(x, n): ''' Input: `x`: Tensor, value range=[-1, 1), shape=[n*n, h, w, c] `n`: sqrt of the number of images Return: `tf.string` (bytes) of the PNG. (write these binary directly into a file) ''' with tf.name_scope('MakeThumbnail'): _, h, w, c = x.get_shape().as_list() x = tf.reshape(x, [n, n, h, w, c]) x = tf.transpose(x, [0, 2, 1, 3, 4]) x = tf.reshape(x, [n * h, n * w, c]) x = x / 2. + .5 x = gray2jet(x) x = tf.image.convert_image_dtype(x, tf.uint8, saturate=True) x = tf.image.encode_png(x) return x
def repeat(tensor: tf.Tensor, repeats: int, axis: int) -> tf.Tensor: """ Repeat elements of the input tensor in the specified axis ``repeats``-times. .. note:: Chaining of this op may produce TF warnings although the performance seems to be unaffected. :param tensor: TF tensor to be repeated :param repeats: number of repeats :param axis: axis to repeat :return: tensor with repeated elements """ shape = tensor.get_shape().as_list() dims = np.arange(len(tensor.shape)) prepare_perm = np.hstack(([axis], np.delete(dims, axis))) restore_perm = np.hstack((dims[1:axis+1], [0], dims[axis+1:])) indices = tf.cast(tf.floor(tf.range(0, shape[axis]*repeats)/tf.constant(repeats)), 'int32') shuffled = tf.transpose(tensor, prepare_perm) repeated = tf.gather(shuffled, indices) return tf.transpose(repeated, restore_perm)
def get_image_summary(img, idx=0): """ Make an image summary for 4d tensor image with index idx """ V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1)) V -= tf.reduce_min(V) V /= tf.reduce_max(V) V *= 255 img_w = tf.shape(img)[1] img_h = tf.shape(img)[2] V = tf.reshape(V, tf.stack((img_w, img_h, 1))) V = tf.transpose(V, (2, 0, 1)) V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1))) return V
def recode(self, inputs, skips=None, store=False, **kwargs): """ Wrapper around seq_update() to fit recode() signature of parent class. """ state = self.get_state() if 'hidden' in state: outputs, states = \ self.seq_update(inputs, state['hidden'], skips=skips, outputs=[state['output']], states=[], as_list=True, store=store) else: outputs, states = \ self.seq_update(inputs, None, skips=skips, outputs=[inputs[:, :, 0]], states=[], as_list=True, store=store) return tf.transpose(tf.stack(outputs[:-1]), perm=[1, 2, 0])
def loss_nce(self,l2_lambda=0.0001): #0.0001-->0.001 """calculate loss using (NCE)cross entropy here""" # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. if self.is_training: #training #labels=tf.reshape(self.input_y,[-1]) #[batch_size,1]------>[batch_size,] labels=tf.expand_dims(self.input_y,1) #[batch_size,]----->[batch_size,1] loss = tf.reduce_mean( #inputs: A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. tf.nn.nce_loss(weights=tf.transpose(self.W_projection),#[hidden_size*2, num_classes]--->[num_classes,hidden_size*2]. nce_weights:A `Tensor` of shape `[num_classes, dim].O.K. biases=self.b_projection, #[label_size]. nce_biases:A `Tensor` of shape `[num_classes]`. labels=labels, #[batch_size,1]. train_labels, # A `Tensor` of type `int64` and shape `[batch_size,num_true]`. The target classes. inputs=self.output_rnn_last,# [batch_size,hidden_size*2] #A `Tensor` of shape `[batch_size, dim]`. The forward activations of the input network. num_sampled=self.num_sampled, #scalar. 100 num_classes=self.num_classes,partition_strategy="div")) #scalar. 1999 l2_losses = tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda loss = loss + l2_losses return loss
def parseNet(self, net, netstruct, istraining = True): for key in netstruct: if key[0] == "conv": net = self.conv3d(net, key[2], key[1],key[3], key[4]) elif key[0] == "fc": net = self.fc(net, key[2], key[1], key[3], key[4],activation = key[-1]) elif key[0] == "maxpool": net = tf.nn.max_pool3d(net, ksize = key[2], strides = key[2], padding = "SAME", name = key[1]) elif key[0] == "dropout" and istraining: net = tf.nn.dropout(net, key[2], name = key[1]) elif key[0] == "reshape": net = tf.reshape(net, key[-1]) elif key[0] == "softmax": net = tf.nn.softmax(net) elif key[0] == "transpose": net = tf.transpose(net, perm=key[-1]) return net
def _rnn_attention_decoder(self, decoder_cell, training_wheels): loop_fn = self._custom_rnn_loop_fn(decoder_cell.output_size, training_wheels=training_wheels) decoder_outputs, _, (context_vectors_array, attention_logits_array, pointer_probability_array) = \ tf.nn.raw_rnn(decoder_cell, loop_fn, swap_memory=True) decoder_outputs = decoder_outputs.stack() decoder_outputs = tf.transpose(decoder_outputs, [1, 0, 2]) attention_logits = attention_logits_array.gather(tf.range(0, attention_logits_array.size() - 1)) attention_logits = tf.transpose(attention_logits, [1, 0, 2]) context_vectors = context_vectors_array.gather(tf.range(0, context_vectors_array.size() - 1)) context_vectors = tf.transpose(context_vectors, [1, 0, 2]) pointer_probabilities = pointer_probability_array.gather(tf.range(0, pointer_probability_array.size() - 1)) pointer_probabilities = tf.transpose(pointer_probabilities, [1, 0]) return decoder_outputs, context_vectors, attention_logits, pointer_probabilities
def _score(self, prev_decoder_state, prev_embedding): # Returns scores in a tensor of shape [batch_size, input_sequence_length] if self.mode == 'decode': query_part = self.query_attention_partial_score_placeholder encoder_part = self.encoder_state_attention_partial_scores_placeholder else: query_part = self.query_attention_partial_score encoder_part = self.encoder_state_attention_partial_scores embedding_part = tf.matmul(prev_embedding, self.attention_w_e) output = tf.matmul(prev_decoder_state, self.attention_w) + embedding_part + query_part + encoder_part + self.attention_b output = tf.tanh(output) output = tf.reduce_sum(self.attention_v * output, axis=2) output = tf.transpose(output, [1, 0]) # Handle input document padding by giving a large penalty, eliminating it from the weighted average padding_penalty = -1e20 * tf.to_float(1 - tf.sign(self.documents_placeholder)) masked = output + padding_penalty return masked
def _bbox_transform(self, ex_rois, gt_rois): ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0 ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0 ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0 gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0 gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = tf.log(gt_widths / ex_widths) targets_dh = tf.log(gt_heights / ex_heights) targets = tf.transpose(tf.pack( (targets_dx, targets_dy, targets_dw, targets_dh), axis=0 )) return targets
def _cumprod(tensor, axis=0): """A custom version of cumprod to prevent NaN gradients when there are zeros in `tensor` as reported here: https://github.com/tensorflow/tensorflow/issues/3862 :param tensor: tf.Tensor :return: tf.Tensor """ transpose_permutation = None n_dim = len(tensor.get_shape()) if n_dim > 1 and axis != 0: if axis < 0: axis += n_dim transpose_permutation = np.arange(n_dim) transpose_permutation[-1], transpose_permutation[0] = 0, axis tensor = tf.transpose(tensor, transpose_permutation) def prod(acc, x): return acc * x prob = tf.scan(prod, tensor) tensor = tf.transpose(prob, transpose_permutation) return tensor
def __init__(self, attention_units, memory, sequence_length=None, time_major=True, mode=0): self.attention_units = attention_units self.enc_units = memory.get_shape()[-1].value if time_major: memory = tf.transpose(memory, perm=(1, 0, 2)) self.enc_length = tf.shape(memory)[1] self.batch_size = tf.shape(memory)[0] self.mode = mode self.mask = array_ops.sequence_mask(sequence_length, self.enc_length, tf.float32) if sequence_length is not None else None self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units)) # pre-compute Uahj to minimize the computational cost with tf.variable_scope('attention'): Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units)) self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1, 1, 1, 1], "SAME")
def __init__(self, attention_units, memory, sequence_length=None, time_major=True, mode=0): self.attention_units = attention_units self.enc_units = memory.get_shape()[-1].value if time_major: memory = tf.transpose(memory, perm=(1,0,2)) self.enc_length = tf.shape(memory)[1] self.batch_size = tf.shape(memory)[0] self.mode = mode self.mask = array_ops.sequence_mask(sequence_length, self.enc_length) if sequence_length is not None else None self.tiny = -math.inf * tf.ones(shape=(self.batch_size, self.enc_length)) self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units)) ### pre-compute Uahj to minimize the computational cost with tf.variable_scope('attention'): Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units)) self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1,1,1,1], "SAME")
def __init__(self, attention_units, memory, time_major=True): self.attention_units = attention_units self.enc_units = memory.get_shape()[-1].value if time_major: memory = tf.transpose(memory, perm=(1,0,2)) self.enc_length = tf.shape(memory)[1] self.batch_size = tf.shape(memory)[0] self.memory = tf.reshape(memory, (tf.shape(memory)[0], self.enc_length, 1, self.enc_units)) # pre-compute Uahj to minimize the computational cost with tf.variable_scope('attention'): Ua = tf.get_variable(name='Ua', shape=(1, 1, self.enc_units, self.attention_units), initializer=gaussian_initializer(mean=0.0, std=0.001)) self.hidden_feats = tf.nn.conv2d(self.memory, Ua, [1,1,1,1], "SAME")
def sampled_softmax_loss(label, logit, projection, num_sampled): """ Args: label: logit: unscaled log probabilities projection: (W, b) num_sampled: """ local_label = tf.reshape(label, shape=(-1,1)) local_logit = tf.reshape(logit, shape=(-1, logit.get_shape()[-1].value)) local_Wt = tf.transpose(projection[0], perm=(1,0)) local_b = projection[1] loss_sum = tf.nn.sampled_softmax_loss(weights=local_Wt, biases=local_b, labels=local_label, inputs=local_logit, num_sampled=num_sampled, num_classes=local_Wt.get_shape()[0].value) loss = tf.divide(tf.reduce_sum(loss_sum), tf.cast(tf.size(local_label), dtype=tf.float32)) return loss
def forward(self,z): if not self.ar: mu,log_sigma = self._get_mu_and_sigma(z) else: # permute z z = tf.reshape(z,[-1]+[1]*self.hps.z_size) perm = np.random.permutation(self.hps.z_size)+1 z = tf.transpose(z,np.append([0],perm)) z = tf.reshape(z,[-1,self.hps.z_size]) mu,log_sigma = ar_layer(z,self.hps,n_hidden=self.n_hidden) log_sigma = tf.clip_by_value(log_sigma,-5,5) if not self.hps.ignore_sigma_flow: y = z * tf.exp(log_sigma) + mu log_det = -1 * log_sigma else: y = z + mu log_det = 0.0 return y,log_det
def _meshgrid(self, height, width): with tf.variable_scope('_meshgrid'): # This should be equivalent to: # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width), # np.linspace(-1, 1, height)) # ones = np.ones(np.prod(x_t.shape)) # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones]) x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0])) y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1), tf.ones(shape=tf.pack([1, width]))) x_t_flat = tf.reshape(x_t, (1, -1)) y_t_flat = tf.reshape(y_t, (1, -1)) ones = tf.ones_like(x_t_flat) grid = tf.concat(0, [x_t_flat, y_t_flat, ones]) return grid
def minibatch_discrimination(input_layer, num_kernels, dim_per_kernel=5, name='minibatch_discrim'): # batch_size = input_layer.shape[0] # num_features = input_layer.shape[1] batch_size = input_layer.get_shape().as_list()[0] num_features = input_layer.get_shape().as_list()[1] W = tf.get_variable('W', [num_features, num_kernels * dim_per_kernel], initializer=tf.contrib.layers.xavier_initializer()) b = tf.get_variable('b', [num_kernels], initializer=tf.constant_initializer(0.0)) activation = tf.matmul(input_layer, W) activation = tf.reshape(activation, [batch_size, num_kernels, dim_per_kernel]) tmp1 = tf.expand_dims(activation, 3) tmp2 = tf.transpose(activation, perm=[1, 2, 0]) tmp2 = tf.expand_dims(tmp2, 0) abs_diff = tf.reduce_sum(tf.abs(tmp1 - tmp2), reduction_indices=[2]) f = tf.reduce_sum(tf.exp(-abs_diff), reduction_indices=[2]) f = f + b return f
def feed_network(self,data,keep_prob,chunk_size,n_chunks, dynamic): # This code is copied from tflearn sequence_lengths = None if dynamic: sequence_lengths = net.calc_seqlenth(data if isinstance(data, tf.Tensor) else tf.stack(data)) batch_size = tf.shape(data)[0] weight_dropout = tf.nn.dropout(self._layer_weights, keep_prob) rnn_dropout = rnn.core_rnn_cell.DropoutWrapper(self._lstm_cell,output_keep_prob=keep_prob) # Calculation Begin input_shape = data.get_shape().as_list() ndim = len(input_shape) axis = [1, 0] + list(range(2,ndim)) data = tf.transpose(data,(axis)) sequence = tf.unstack(data) outputs, states = rnn.static_rnn(rnn_dropout, sequence, dtype=tf.float32, sequence_length = sequence_lengths) if dynamic: outputs = tf.transpose(tf.stack(outputs), [1, 0, 2]) output = net.advanced_indexing_op(outputs, sequence_lengths) else: output = outputs[-1] output = tf.add(tf.matmul(output,weight_dropout), self._layer_biases) return output
def _embed_sentences(self): """Tensorflow implementation of Simple but Tough-to-Beat Baseline""" # Get word features word_embeddings = self._get_embedding() word_feats = tf.nn.embedding_lookup(word_embeddings, self.input) # Get marginal estimates and scaling term batch_size = tf.shape(word_feats)[0] a = tf.pow(10.0, self._get_a_exp()) p = tf.constant(self.marginals, dtype=tf.float32, name='marginals') q = tf.reshape( a / (a + tf.nn.embedding_lookup(p, self.input)), (batch_size, self.mx_len, 1) ) # Compute initial sentence embedding z = tf.reshape(1.0 / tf.to_float(self.input_lengths), (batch_size, 1)) S = z * tf.reduce_sum(q * word_feats, axis=1) # Compute common component S_centered = S - tf.reduce_mean(S, axis=0) _, _, V = tf.svd(S_centered, full_matrices=False, compute_uv=True) self.tf_ccx = tf.stop_gradient(tf.gather(tf.transpose(V), 0)) # Common component removal ccx = tf.reshape(self._get_common_component(), (1, self.d)) sv = {'embeddings': word_embeddings, 'a': a, 'p': p, 'ccx': ccx} return S - tf.matmul(S, ccx * tf.transpose(ccx)), sv
def rnn_layers(features, sequence_length, num_classes): """Build a stack of RNN layers from input features""" # Input features is [batchSize paddedSeqLen numFeatures] logit_activation = tf.nn.relu weight_initializer = tf.contrib.layers.variance_scaling_initializer() bias_initializer = tf.constant_initializer(value=0.0) with tf.variable_scope("rnn"): # Transpose to time-major order for efficiency rnn_sequence = tf.transpose(features, perm=[1, 0, 2], name='time_major') rnn1 = rnn_layer(rnn_sequence, sequence_length, rnn_size, 'bdrnn1') rnn2 = rnn_layer(rnn1, sequence_length, rnn_size, 'bdrnn2') rnn_logits = tf.layers.dense( rnn2, num_classes+1, activation=logit_activation, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, name='logits') return rnn_logits
def channel_wise_fc_layer(self, input, name): # bottom: (7x7x512) _, width, height, n_feat_map = input.get_shape().as_list() input_reshape = tf.reshape( input, [-1, width*height, n_feat_map] ) input_transpose = tf.transpose( input_reshape, [2,0,1] ) with tf.variable_scope(name): W = tf.get_variable( "W", shape=[n_feat_map,width*height, width*height], # (512,49,49) initializer=tf.random_normal_initializer(0., 0.005)) output = tf.batch_matmul(input_transpose, W) output_transpose = tf.transpose(output, [1,2,0]) output_reshape = tf.reshape( output_transpose, [-1, height, width, n_feat_map] ) return output_reshape
def fused_birnn(fused_rnn, inputs, sequence_length, initial_state=(None, None), dtype=None, scope=None, time_major=False, backward_device=None): with tf.variable_scope(scope or "BiRNN"): sequence_length = tf.cast(sequence_length, tf.int32) if not time_major: inputs = tf.transpose(inputs, [1, 0, 2]) outputs_fw, state_fw = fused_rnn(inputs, sequence_length=sequence_length, initial_state=initial_state[0], dtype=dtype, scope="FW") if backward_device is not None: with tf.device(backward_device): outputs_bw, state_bw = fused_rnn_backward(fused_rnn, inputs, sequence_length, initial_state[1], dtype, scope="BW") else: outputs_bw, state_bw = fused_rnn_backward(fused_rnn, inputs, sequence_length, initial_state[1], dtype, scope="BW") if not time_major: outputs_fw = tf.transpose(outputs_fw, [1, 0, 2]) outputs_bw = tf.transpose(outputs_bw, [1, 0, 2]) return (outputs_fw, outputs_bw), (state_fw, state_bw)
def __call__(self, u_t, a, b, scope=None): """ :param u_t: [N, M, d] :param a: [N, M. d] :param b: [N, M. d] :param mask: [N, M] :return: """ N, M, d = self.batch_size, self.mem_size, self.hidden_size L, sL = self.L, self.sL with tf.name_scope(scope or self.__class__.__name__): L = tf.tile(tf.expand_dims(tf.expand_dims(L, 0), 0), [N, d, 1, 1]) sL = tf.tile(tf.expand_dims(tf.expand_dims(sL, 0), 0), [N, d, 1, 1]) logb = tf.log(b + 1e-9) # [N, M, d] logb = tf.concat(1, [tf.zeros([N, 1, d]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])]) # [N, M, d] logb = tf.expand_dims(tf.transpose(logb, [0, 2, 1]), -1) # [N, d, M, 1] left = L * tf.exp(tf.batch_matmul(L, logb * sL)) # [N, d, M, M] right = a * u_t # [N, M, d] right = tf.expand_dims(tf.transpose(right, [0, 2, 1]), -1) # [N, d, M, 1] u = tf.batch_matmul(left, right) # [N, d, M, 1] u = tf.transpose(tf.squeeze(u, [3]), [0, 2, 1]) # [N, M, d] return u
def dot(x, y): '''Multiplies 2 tensors. When attempting to multiply a ND tensor with a ND tensor, reproduces the Theano behavior (e.g. (2, 3).(4, 3, 5) = (2, 4, 5)) ''' if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): x_shape = (-1,) + int_shape(x)[1:] y_shape = int_shape(y) y_permute_dim = list(range(ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = tf.reshape(x, [-1, x_shape[-1]]) yt = tf.reshape(tf.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return tf.reshape(tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) if is_sparse(x): out = tf.sparse_tensor_dense_matmul(x, y) else: out = tf.matmul(x, y) return out
def get_output_for(self, input, **kwargs): input_shape = tf.shape(input) n_batches = input_shape[0] n_steps = input_shape[1] input = tf.reshape(input, tf.pack([n_batches, n_steps, -1])) if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']: h0s = kwargs['recurrent_state'][self] else: h0s = tf.tile( tf.reshape(self.h0, (1, self.num_units)), (n_batches, 1) ) # flatten extra dimensions shuffled_input = tf.transpose(input, (1, 0, 2)) hs = tf.scan( self.step, elems=shuffled_input, initializer=h0s ) shuffled_hs = tf.transpose(hs, (1, 0, 2)) if 'recurrent_state_output' in kwargs: kwargs['recurrent_state_output'][self] = shuffled_hs return shuffled_hs
def get_output_for(self, input, **kwargs): input_shape = tf.shape(input) n_batches = input_shape[0] n_steps = input_shape[1] input = tf.reshape(input, tf.pack([n_batches, n_steps, -1])) c0s = tf.tile( tf.reshape(self.c0, (1, self.num_units)), (n_batches, 1) ) h0s = self.nonlinearity(c0s) # flatten extra dimensions shuffled_input = tf.transpose(input, (1, 0, 2)) hcs = tf.scan( self.step, elems=shuffled_input, initializer=tf.concat(1, [h0s, c0s]) ) shuffled_hcs = tf.transpose(hcs, (1, 0, 2)) shuffled_hs = shuffled_hcs[:, :, :self.num_units] shuffled_cs = shuffled_hcs[:, :, self.num_units:] return shuffled_hs
def _preprocess_conv2d_input(x, data_format): """Transpose and cast the input before the conv2d. # Arguments x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. # Returns A tensor. """ if dtype(x) == 'float64': x = tf.cast(x, 'float32') if data_format == 'channels_first': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = tf.transpose(x, (0, 2, 3, 1)) return x
def batch_to_time(x, block_size): """Inverse of `time_to_batch(x, block_size)`. Args: x: Tensor of shape [nb*block_size, k, n] for some natural number k. block_size: number of time steps (i.e. size of dimension 1) in the output tensor. Returns: Tensor of shape [nb, k*block_size, n]. """ shape = x.get_shape().as_list() y = tf.reshape(x, [shape[0] // block_size, block_size, shape[1], shape[2]]) y = tf.transpose(y, [0, 2, 1, 3]) y = tf.reshape(y, [shape[0] // block_size, shape[1] * block_size, shape[2]]) y.set_shape([mul_or_none(shape[0], 1. / block_size), mul_or_none(shape[1], block_size), shape[2]]) return y
def time_to_batch(x, dilation): with tf.name_scope('time_to_batch'): shape = x.get_shape().as_list() y = tf.reshape(x, [ shape[0], shape[1] // dilation, dilation, shape[2] ]) y = tf.transpose(y, [0, 2, 1, 3]) y = tf.reshape(y, [ shape[0] * dilation, shape[1] // dilation, shape[2] ]) y.set_shape([ mul_or_none(shape[0], dilation), mul_or_none(shape[1], 1. / dilation), shape[2] ]) return y # tensorflow/magenta/blob/master/magenta/models/nsynth/wavenet/masked.py#L85
def __init__(self, training, cell, embedding, start_tokens, end_token, initial_state, beam_width, output_layer=None, gold_sequence=None, gold_sequence_length=None): self._training = training self._cell = cell self._output_layer = output_layer self._embedding_fn = lambda ids: tf.nn.embedding_lookup(embedding, ids) self._output_size = output_layer.units if output_layer is not None else self._output.output_size self._batch_size = tf.size(start_tokens) self._beam_width = beam_width self._tiled_initial_cell_state = nest.map_structure(self._maybe_split_batch_beams, initial_state, self._cell.state_size) self._start_tokens = start_tokens self._tiled_start_tokens = self._maybe_tile_batch(start_tokens) self._end_token = end_token self._original_gold_sequence = gold_sequence self._gold_sequence = gold_sequence self._gold_sequence_length = gold_sequence_length if training: assert self._gold_sequence is not None assert self._gold_sequence_length is not None self._max_time = int(self._gold_sequence.shape[1]) # transpose gold sequence to be time major and make it into a TensorArray self._gold_sequence = tf.TensorArray(dtype=tf.int32, size=self._max_time) self._gold_sequence = self._gold_sequence.unstack(tf.transpose(gold_sequence, [1, 0]))
def finalize_predictions(self, preds : FinalBeamSearchOptimizationDecoderOutput): # predicted_ids is [max_time, batch_size, beam_width] because that's how gather_tree produces it # transpose it to be [batch_size, beam_width, max_time] which is what we expect return tf.transpose(preds.predicted_ids, [1, 2, 0])
def __init__(self, grammar : AbstractGrammar, *args, training_output=None, grammar_helper : GrammarHelper = None, **kw): super().__init__(*args, **kw) self._grammar = grammar self._grammar_helper = grammar_helper if grammar_helper is not None else GrammarHelper(grammar) self._fixed_outputs = training_output if training_output is not None: self._fixed_outputs = tf.TensorArray(dtype=tf.int32, size=training_output.get_shape()[1]) self._fixed_outputs = self._fixed_outputs.unstack(tf.transpose(training_output, [1, 0]))
def decov_loss(xs): """Decov loss as described in https://arxiv.org/pdf/1511.06068.pdf 'Reducing Overfitting In Deep Networks by Decorrelating Representation' """ x = tf.reshape(xs, [int(xs.get_shape()[0]), -1]) m = tf.reduce_mean(x, 0, True) z = tf.expand_dims(x-m, 2) corr = tf.reduce_mean(tf.matmul(z, tf.transpose(z, perm=[0,2,1])), 0) corr_frob_sqr = tf.reduce_sum(tf.square(corr)) corr_diag_sqr = tf.reduce_sum(tf.square(tf.diag_part(corr))) loss = 0.5*(corr_frob_sqr - corr_diag_sqr) return loss
def gather_nd(params, indices, shape): rank = len(shape) flat_params = tf.reshape(params, [-1]) multipliers = [reduce(lambda x, y: x*y, shape[i+1:], 1) for i in range(0, rank)] indices_unpacked = tf.unstack(tf.transpose(indices, [rank - 1] + list(range(0, rank - 1)))) flat_indices = sum([a*b for a,b in zip(multipliers, indices_unpacked)]) return tf.gather(flat_params, flat_indices) # ctc_label_dense_to_sparse is taken from https://github.com/tensorflow/tensorflow/issues/1742#issuecomment-205291527 # # The CTC implementation in TensorFlow needs labels in a sparse representation, # but sparse data and queues don't mix well, so we store padded tensors in the # queue and convert to a sparse representation after dequeuing a batch. #
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size): # The second dimension of labels must be equal to the longest label length in the batch correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths)) with tf.control_dependencies([correct_shape_assert]): labels = tf.identity(labels) label_shape = tf.shape(labels) num_batches_tns = tf.stack([label_shape[0]]) max_num_labels_tns = tf.stack([label_shape[1]]) def range_less_than(previous_state, current_input): return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool) init = tf.expand_dims(init, 0) dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.boolean_mask(label_array, dense_mask) batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0]))) batch_ind = tf.boolean_mask(batch_array, dense_mask) indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1])) shape = [batch_size, tf.reduce_max(label_lengths)] vals_sparse = gather_nd(labels, indices, shape) return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape)) # Validate and normalize transcriptions. Returns a cleaned version of the label # or None if it's invalid.
def load_rbm_weights(self, path, layer_index): assert layer_index < len(self.layer_names) data_dict = {} data_dict[self.layer_names[layer_index][0]] = self.encoding_weights[layer_index] data_dict[self.layer_names[layer_index][1]] = self.encoding_biases[layer_index] saver = tf.train.Saver(data_dict) saver.restore(self.sess, path) # Now, we must also load decoding weights. self.sess.run(self.decoding_weights[layer_index].assign(tf.transpose(self.encoding_weights[layer_index])))