我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.reverse()。
def fix_variables(self, sess, pretrained_model): print('Fix VGG16 layers..') with tf.variable_scope('Fix_VGG16') as scope: with tf.device("/cpu:0"): # fix the vgg16 issue from conv weights to fc weights # fix RGB to BGR fc6_conv = tf.get_variable("fc6_conv", [7, 7, 512, 4096], trainable=False) fc7_conv = tf.get_variable("fc7_conv", [1, 1, 4096, 4096], trainable=False) conv1_rgb = tf.get_variable("conv1_rgb", [3, 3, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._scope + "/fc6/weights": fc6_conv, self._scope + "/fc7/weights": fc7_conv, self._scope + "/conv1/conv1_1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc6/weights:0'], tf.reshape(fc6_conv, self._variables_to_fix[self._scope + '/fc6/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix[self._scope + '/fc7/weights:0'], tf.reshape(fc7_conv, self._variables_to_fix[self._scope + '/fc7/weights:0'].get_shape()))) sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/conv1_1/weights:0'], tf.reverse(conv1_rgb, [2])))
def __declare_variables(self): # Bias variables self.bias = self.__bias_variables([self.num_features], 'bias') self.cias = self.__bias_variables([self.depth], 'cias') # Visible (input) units with tf.name_scope('visible') as _: self.x = self._input if self._input is not None \ else tf.placeholder(tf.float32, shape=self.input_shape, name='x') self.vis_0 = tf.div(self.x, 255, 'vis_0') # Weight variables with tf.name_scope('weights') as _: self.weights = self.__weight_variable(self.weight_shape, 'weights_forward') self.weights_flipped = tf.transpose( tf.reverse(self.weights, [True, True, False, False]), perm=[0, 1, 3, 2], name='weight_back') self.variables = [self.bias, self.cias, self.weights]
def _create_position_embedding(self, lengths, maxlen): # Slice to size of current sequence pe_slice = self.pos_embed[2:maxlen+2, :] # Replicate encodings for each element in the batch batch_size = tf.shape(lengths)[0] pe_batch = tf.tile([pe_slice], [batch_size, 1, 1]) # Mask out positions that are padded positions_mask = tf.sequence_mask( lengths=lengths, maxlen=maxlen, dtype=tf.float32) positions_embed = pe_batch * tf.expand_dims(positions_mask, 2) positions_embed = tf.reverse_sequence(positions_embed, lengths, batch_dim=0, seq_dim=1) # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]] [4,2] positions_embed = tf.reverse(positions_embed,[1]) # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]] return positions_embed
def bw_dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None, dtype=None, parallel_iterations=None, swap_memory=False, time_major=False, scope=None): assert not time_major # TODO : to be implemented later! flat_inputs = flatten(inputs, 2) # [-1, J, d] flat_len = None if sequence_length is None else tf.cast(flatten(sequence_length, 0), 'int64') flat_inputs = tf.reverse(flat_inputs, 1) if sequence_length is None \ else tf.reverse_sequence(flat_inputs, sequence_length, 1) flat_outputs, final_state = _dynamic_rnn(cell, flat_inputs, sequence_length=flat_len, initial_state=initial_state, dtype=dtype, parallel_iterations=parallel_iterations, swap_memory=swap_memory, time_major=time_major, scope=scope) flat_outputs = tf.reverse(flat_outputs, 1) if sequence_length is None \ else tf.reverse_sequence(flat_outputs, sequence_length, 1) outputs = reconstruct(flat_outputs, inputs, 2) return outputs, final_state
def _compute_rnn_outputs(self): reversed_inputs = tf.reverse(self.inputs, [False, True, False]) reversed_resets = tf.reverse(self.resets, [False, True, False]) with tf.variable_scope('fw'): self._fw_lstm = LSTM(self.inputs, self.resets, self.training, self.num_layers, self.hidden_layer_size, self.init_scale, self.dropout_keep_prob) with tf.variable_scope('rv'): self._rv_lstm = LSTM(reversed_inputs, reversed_resets, self.training, self.num_layers, self.hidden_layer_size, self.init_scale, self.dropout_keep_prob) fw_outputs = self._fw_lstm.outputs rv_outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False]) outputs = tf.concat(2, [fw_outputs, rv_outputs]) return outputs
def reverse(x, axes): """Reverse a tensor along the the specified axes # Returns A tensor. """ if isinstance(axes, int): axes = [axes] try: return tf.reverse_v2(x, axes) except AttributeError: # Older TF versions. dims = [True if i in axes else False for i in range(len(x.get_shape()._dims))] return tf.reverse(x, dims) # VALUE MANIPULATION
def pos_prediction(self): outputs, size, batch_size = self.outputs num_class = len(POS_tagging['P']) output_w = weight_variable([size, num_class]) output_b = bias_variable([num_class]) # outputs = tf.transpose(outputs,[1,0,2]) tag_trans = weight_variable([num_class, num_class]) outputs = tf.reverse(outputs, [True, False, False]) def transition(previous_pred, x): res = tf.matmul(x, output_w) + output_b deviation = tf.tile(tf.expand_dims(tf.reduce_min(previous_pred, reduction_indices=1), 1), [1, num_class]) previous_pred -= deviation focus = 0.5 res += tf.matmul(previous_pred, tag_trans) * focus prediction = tf.nn.softmax(res) return prediction # Recurrent network. pred = tf.scan(transition, outputs, initializer=tf.zeros([batch_size, num_class]), parallel_iterations=100) pred = tf.reverse(pred, [True, False, False]) pred = tf.transpose(pred, [1, 0, 2]) return pred
def __init__(self, namespace, input_state, opts): super(ValueNetwork, self).__init__(namespace) with tf.variable_scope(namespace): # do potential horizontal flipping of input state # recall input is (batch, height, width, rgb) and we want to flip on width flipped_input_state = tf.cond(base_network.FLIP_HORIZONTALLY, lambda: tf.reverse(input_state, dims=[False, False, True, False]), lambda: input_state) # expose self.input_state_representation since it will be the network "shared" # by l_value & output_action network when running --share-input-state-representation self.conv_net_output = self.conv_net_on(flipped_input_state, opts) self.hidden_layers = self.hidden_layers_on(self.conv_net_output, [100, 50]) self.value = slim.fully_connected(scope='fc', inputs=self.hidden_layers, num_outputs=1, weights_regularizer=tf.contrib.layers.l2_regularizer(0.01), activation_fn=None) # (batch, 1)
def ctc_label_dense_to_sparse( self, labels, label_lengths ): """Mike Henry's implementation, with some minor modifications.""" with self.G.as_default(): label_shape = tf.shape( labels ) num_batches_tns = tf.stack( [label_shape[0]] ) max_num_labels_tns = tf.stack( [label_shape[1]] ) def range_less_than(previous_state, current_input): return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool ) init = tf.expand_dims( init, 0 ) dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1) dense_mask = dense_mask[ :, 0, : ] label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape ) label_ind = tf.boolean_mask( label_array, dense_mask ) batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0, label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) ) batch_ind = tf.boolean_mask( batch_array, dense_mask ) indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) ) vals_sparse = tf.gather_nd( labels, indices ) return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
def reverse(x, axes): """Reverse a tensor along the specified axes. # Arguments x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. # Returns A tensor. """ if isinstance(axes, int): axes = [axes] return tf.reverse(x, axes) # VALUE MANIPULATION
def get_decoder_states(self): batch_size = tf.shape(self.input)[0] seq_length = tf.shape(self.input)[1] scan_input_ = tf.transpose(self.input, perm=[2, 0, 1]) scan_input_ = tf.transpose(scan_input_) # scan input is [seq_length x batch_size x input_dim] z = tf.zeros([1, batch_size, self.input_dim], dtype=tf.float32) scan_input = tf.concat([scan_input_,z],0) scan_input = tf.slice(scan_input, [1,0,0],[seq_length ,batch_size, self.input_dim]) scan_input = tf.reverse(scan_input, [0])#tf.reverse(scan_input, [True, False, False]) scan_time_ = tf.transpose(self.time) # scan_time [seq_length x batch_size] z2 = tf.zeros([1, batch_size], dtype=tf.float32) scan_time = tf.concat([scan_time_, z2],0) scan_time = tf.slice(scan_time, [1,0],[seq_length ,batch_size]) scan_time = tf.reverse(scan_time, [0])#tf.reverse(scan_time, [True, False]) initial_hidden, initial_cell = self.get_representation() ini_state_cell = tf.stack([initial_hidden, initial_cell]) # make scan_time [seq_length x batch_size x 1] scan_time = tf.reshape(scan_time, [tf.shape(scan_time)[0], tf.shape(scan_time)[1], 1]) concat_input = tf.concat([scan_time, scan_input],2) # [seq_length x batch_size x input_dim+1] packed_hidden_states = tf.scan(self.T_LSTM_Decoder_Unit, concat_input, initializer=ini_state_cell, name='decoder_states') all_decoder_states = packed_hidden_states[:, 0, :, :] return all_decoder_states
def lstm_seq2seq_internal(inputs, targets, hparams, train): """The basic LSTM seq2seq model, main step used for training.""" with tf.variable_scope("lstm_seq2seq"): if inputs is not None: # Flatten inputs. inputs = common_layers.flatten4d3d(inputs) # LSTM encoder. _, final_encoder_state = lstm( tf.reverse(inputs, axis=[1]), hparams, train, "encoder") else: final_encoder_state = None # LSTM decoder. shifted_targets = common_layers.shift_right(targets) decoder_outputs, _ = lstm( common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder", initial_state=final_encoder_state) return tf.expand_dims(decoder_outputs, axis=2)
def positive_conv(a, b): """Pairwise convolution on the positive domain of batches of 1-d vectors. Args: a: discrete function on the positive domain (e.g. real-valued vector with a[0] = f(0), etc). Shape of [batch_size, domain_size]. b: same as a. Returns: Discrete function on positive domain representing convolution of a and b. """ batch_size = a.get_shape().dims[0].value width = a.get_shape().dims[1].value a = tf.pad(a, [[0, 0], [width, 0]]) a = tf.transpose(a) b = tf.pad(b, [[0, 0], [width, 0]]) b = tf.reverse(b, [False, True]) b = tf.transpose(b) reshaped_a = tf.reshape(a, [1, 1, width * 2, batch_size]) reshaped_b = tf.reshape(b, [1, width * 2, batch_size, 1]) res = tf.nn.depthwise_conv2d( reshaped_a, reshaped_b, strides=[1, 1, 1, 1], padding="SAME") res = tf.reshape(tf.transpose(res), [batch_size, width * 2]) res = tf.slice(res, [0, width], [batch_size, width]) return res
def __call__(self, inputs, init_state=None): if init_state is None: init_state = self.zero_state init_states = tf.unstack(init_state) next_inputs = inputs for i, cell in enumerate(self.cells): with tf.variable_scope('bilstm_%d' % i): with tf.variable_scope('forward'): f_outputs = cell.scan(next_inputs, init_states[i]) with tf.variable_scope('backward'): r_inputs = tf.reverse(next_inputs, axis=(0,)) rb_outputs = cell.scan(r_inputs, init_states[i]) b_outputs = tf.reverse(rb_outputs, axis=(0,)) outputs = tf.concat([f_outputs, b_outputs], axis=2) next_inputs = tf.nn.dropout(outputs, keep_prob=self.dropout) return next_inputs
def image_mirroring(img, label, seed): """ Randomly mirrors the images. Args: img: Training image to mirror. label: Segmentation mask to mirror. seed: Random seed. """ distort_left_right_random = tf.random_uniform([1], 0, 1.0, dtype=tf.float32, seed=seed)[0] mirror = tf.less(tf.stack([1.0, distort_left_right_random, 1.0]), 0.5) mirror = tf.boolean_mask([0, 1, 2], mirror) img = tf.reverse(img, mirror) label = tf.reverse(label, mirror) return img, label
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size): # The second dimension of labels must be equal to the longest label length in the batch correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths)) with tf.control_dependencies([correct_shape_assert]): labels = tf.identity(labels) label_shape = tf.shape(labels) num_batches_tns = tf.stack([label_shape[0]]) max_num_labels_tns = tf.stack([label_shape[1]]) def range_less_than(previous_state, current_input): return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool) init = tf.expand_dims(init, 0) dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.boolean_mask(label_array, dense_mask) batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0]))) batch_ind = tf.boolean_mask(batch_array, dense_mask) indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1])) shape = [batch_size, tf.reduce_max(label_lengths)] vals_sparse = gather_nd(labels, indices, shape) return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape)) # Validate and normalize transcriptions. Returns a cleaned version of the label # or None if it's invalid.
def fix_variables(self, sess, pretrained_model): print('Fix Resnet V1 layers..') with tf.variable_scope('Fix_Resnet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR conv1_rgb = tf.get_variable("conv1_rgb", [7, 7, 3, 64], trainable=False) restorer_fc = tf.train.Saver({self._scope + "/conv1/weights": conv1_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._scope + '/conv1/weights:0'], tf.reverse(conv1_rgb, [2])))
def _add_gt_image(self): # add back mean image = self._image + cfg.PIXEL_MEANS # BGR to RGB (opencv uses BGR) resized = tf.image.resize_bilinear(image, tf.to_int32(self._im_info[:2] / self._im_info[2])) self._gt_image = tf.reverse(resized, axis=[-1])
def fix_variables(self, sess, pretrained_model): print('Fix MobileNet V1 layers..') with tf.variable_scope('Fix_MobileNet_V1') as scope: with tf.device("/cpu:0"): # fix RGB to BGR, and match the scale by (255.0 / 2.0) Conv2d_0_rgb = tf.get_variable("Conv2d_0_rgb", [3, 3, 3, max(int(32 * self._depth_multiplier), 8)], trainable=False) restorer_fc = tf.train.Saver({self._scope + "/Conv2d_0/weights": Conv2d_0_rgb}) restorer_fc.restore(sess, pretrained_model) sess.run(tf.assign(self._variables_to_fix[self._scope + "/Conv2d_0/weights:0"], tf.reverse(Conv2d_0_rgb / (255.0 / 2.0), [2])))
def pixel_wise_softmax(output_map): exponential_map = tf.exp(output_map) evidence = tf.add(exponential_map,tf.reverse(exponential_map,[False,False,False,True])) return tf.div(exponential_map,evidence, name="pixel_wise_softmax")
def random_flip_left_right(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack( [1.0, 1.0, uniform_random, 1.0]), 0.5) return tf.reverse(image, mirror)
def random_flip_up_down(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack( [1.0, uniform_random, 1.0, 1.0]), 0.5) return tf.reverse(image, mirror)
def conv1DWavelet(wav, waveletWidth, waveletEquation): kernelSamples = waveletWidth * 10 kernel = waveletEquation(waveletWidth, kernelSamples) kernel = tf.reverse(kernel, [0]) kernel = tf.reshape(kernel, tf.stack([kernelSamples,1,1,1])) conv = tf.nn.conv2d(wav, kernel, [1,1,1,1], padding='SAME') conv = tf.squeeze(tf.squeeze(conv)) return conv
def load_image(self, image_path, is_jpeg): # Read the file file_data = tf.read_file(image_path) # Decode the image data img = tf.cond( is_jpeg, lambda: tf.image.decode_jpeg(file_data, channels=self.data_spec.channels), lambda: tf.image.decode_png(file_data, channels=self.data_spec.channels)) if self.data_spec.expects_bgr: # Convert from RGB channel ordering to BGR # This matches, for instance, how OpenCV orders the channels. img = tf.reverse(img, [False, False, True]) return img
def process(self): idx, image_path = self.path_queue.dequeue() img = tf.image.decode_jpeg(tf.read_file(image_path), channels=3) # It is an RGB PNG img = tf.reverse(img, [False, False, True]) # RGB -> BGR return (idx, ImageReader.process_single_image(img, self.image_spec['scale_size'], self.image_spec['crop_size'], self.image_spec['mean']))
def reverse(x, axes): '''Reverse a tensor along the the specified axes ''' if type(axes) == int: axes = [axes] dims = [True if i in axes else False for i in range(len(x.get_shape()._dims))] return tf.reverse(x, dims) # VALUE MANIPULATION
def ctc_label_dense_to_sparse(labels, label_lengths): # undocumented feature soon to be made public from tensorflow.python.ops import functional_ops label_shape = tf.shape(labels) num_batches_tns = tf.pack([label_shape[0]]) max_num_labels_tns = tf.pack([label_shape[1]]) def range_less_than(previous_state, current_input): return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input) init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool) dense_mask = functional_ops.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.boolean_mask(label_array, dense_mask) batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [True]))) batch_ind = tf.boolean_mask(batch_array, dense_mask) indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1])) vals_sparse = tf.gather_nd(labels, indices) return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
def get_output_for(self, input, **kwargs): axis = self.axis ndims = input.get_shape().ndims if axis < 0: axis += ndims if isinstance(self.slice, int) and self.slice < 0: return tf.reverse(input, [False] * self.axis + [True] + [False] * (ndims - axis - 1))[ (slice(None),) * axis + (-1 - self.slice,) + (slice(None),) * (ndims - axis - 1) ] # import ipdb; ipdb.set_trace() return input[(slice(None),) * axis + (self.slice,) + (slice(None),) * (ndims - axis - 1)]
def __gradient_ascent(self): # Gradient ascent with tf.name_scope('gradient') as _: self.grad_bias = tf.mul(tf.reduce_mean(self.hid_prob0 - self.hid_prob1, [0, 1, 2]), self.learning_rate * self.batch_size, name='grad_bias') self.grad_cias = tf.mul(tf.reduce_mean(self.vis_0 - self.vis_1, [0, 1, 2]), self.learning_rate * self.batch_size, name='grad_cias') # TODO: Is there any method to calculate batch-elementwise convolution? temp_grad_weights = tf.zeros(self.weight_shape) hid_filter0 = tf.reverse(self.hid_prob0, [False, True, True, False]) hid_filter1 = tf.reverse(self.hid_prob1, [False, True, True, False]) for idx in range(0, self.batch_size): hid0_ith = self.__get_ith_hid_4d(hid_filter0, idx) hid1_ith = self.__get_ith_hid_4d(hid_filter1, idx) positive = [0] * self.depth negative = [0] * self.depth one_ch_conv_shape = [self.width, self.height, 1, self.num_features] for jdx in range(0, self.depth): positive[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_0, idx, jdx), hid0_ith), one_ch_conv_shape) negative[jdx] = tf.reshape(self.__conv2d(self.__get_ij_vis_4d(self.vis_1, idx, jdx), hid1_ith), one_ch_conv_shape) positive = tf.concat(2, positive) negative = tf.concat(2, negative) temp_grad_weights = tf.add(temp_grad_weights, tf.slice(tf.sub(positive, negative), [0, 0, 0, 0], self.weight_shape)) self.grad_weights = tf.mul(temp_grad_weights, self.learning_rate / (self.width * self.height)) self.gradient_ascent = [self.weights.assign_add(self.grad_weights), self.bias.assign_add(self.grad_bias), self.cias.assign_add(self.grad_cias)]
def default_params(): params = Seq2SeqModel.default_params().copy() params.update({ "encoder.class": "seq2seq.encoders.ConvEncoderFairseq", "encoder.params": {}, # Arbitrary parameters for the encoder "decoder.class": "seq2seq.decoders.ConvDecoder", "decoder.params": {}, # Arbitrary parameters for the decoder "source.max_seq_len": 50, "source.reverse": False, "target.max_seq_len": 50, "embedding.dim": 256, "embedding.init_scale": 0.04, "embedding.share": False, "position_embeddings.num_positions": 100, "inference.beam_search.beam_width": 0, "inference.beam_search.length_penalty_weight": 1.0, "inference.beam_search.choose_successors_fn": "choose_top_k", "vocab_source": "", "vocab_target": "", "optimizer.name": "Momentum", "optimizer.learning_rate": 0.25, "optimizer.params": {"momentum": 0.99, "use_nesterov": True}, # Arbitrary parameters for the optimizer #"optimizer.params": { "epsilon": 0.0000008}, # Arbitrary parameters for the optimizer "optimizer.lr_decay_type": "exponential_decay", "optimizer.lr_decay_steps": 5000, # one epoch steps "optimizer.lr_decay_rate": 0.9, "optimizer.lr_start_decay_at": 0, # start annealing epoch 0 "optimizer.lr_stop_decay_at": tf.int32.max, "optimizer.lr_min_learning_rate": 1e-5, "optimizer.lr_staircase": True, "optimizer.clip_gradients": 0.1, "optimizer.clip_embed_gradients": 5, "optimizer.sync_replicas": 0, "optimizer.sync_replicas_to_aggregate": 0, }) return params
def encode(self, features, labels): features["source_ids"] = tf.reverse_sequence(features["source_ids"], features["source_len"], batch_dim=0, seq_dim=1) # [[1,2,3,4,PAD,PAD,PAD],[2,3,PAD,PAD,PAD,PAD,PAD]] [4,2] features["source_ids"] = tf.reverse(features["source_ids"],[1]) # --> [[4,3,2,1,PAD,PAD,PAD],[3,2,PAD,PAD,PAD,PAD,PAD]] --> [[PAD,PAD,PAD,1,2,3,4],[PAD,PAD,PAD,PAD,PAD,2,3]] source_embedded = tf.nn.embedding_lookup(self.source_embedding_fairseq(), features["source_ids"]) encoder_fn = self.encoder_class(self.params["encoder.params"], self.mode, self.source_pos_embedding_fairseq()) return encoder_fn(source_embedded, features["source_len"])
def get_output_for(self, input, **kwargs): axis = self.axis ndims = input.get_shape().ndims if axis < 0: axis += ndims if isinstance(self.slice, int) and self.slice < 0: return tf.reverse(input, [self.axis + 1])[ (slice(None),) * axis + (-1 - self.slice,) + (slice(None),) * (ndims - axis - 1) ] # import ipdb; ipdb.set_trace() return input[(slice(None),) * axis + (self.slice,) + (slice(None),) * (ndims - axis - 1)]
def build_graph(self): # Build graph state = graph.Placeholder(np.float32, shape=(2, )) reverse = graph.TfNode(tf.reverse(state.node, [0])) # Expose public API self.op_get_action = self.Op(reverse, state=state)
def cummax(x, reverse=False, name=None): """Compute the cumulative maximum of the tensor `x` along `axis`. This operation is similar to the more classic `cumsum`. Only support 1D Tensor for now. Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor` of type `int32` (default: 0). reverse: A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ with ops.name_scope(name, "Cummax", [x]) as name: x = ops.convert_to_tensor(x, name="x") # Not very optimal: should directly integrate reverse into tf.scan. if reverse: x = tf.reverse(x, axis=[0]) # 'Accumlating' maximum: ensure it is always increasing. cmax = tf.scan(lambda a, y: tf.maximum(a, y), x, initializer=None, parallel_iterations=1, back_prop=False, swap_memory=False) if reverse: cmax = tf.reverse(cmax, axis=[0]) return cmax
def main(self): train_graph = tf.Graph() save_path = self.path + '/checkpoints/dev' source_path = self.path + '/data/small_vocab_en' target_path = self.path + '/data/small_vocab_fr' PreProcess(source_path, target_path).process_and_save_data() _, batch_size, rnn_size, num_layers, encoding_embedding_size, decoding_embedding_size, _, _ = \ Params().get() (source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = \ self.load_process() max_source_sentence_length = max([len(sentence) for sentence in source_int_text]) with train_graph.as_default(): input_data, targets, lr, keep_prob = Inputs().get() sequence_length = tf.placeholder_with_default( max_source_sentence_length, None, name='sequence_length') input_shape = tf.shape(input_data) train_logits, inference_logits = Seq2seq().seq2seq_model( tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int), encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int) tf.identity(inference_logits, 'logits') with tf.name_scope("optimization"): cost = tf.contrib.seq2seq.sequence_loss(train_logits, targets, tf.ones([input_shape[0], sequence_length])) optimizer = tf.train.AdamOptimizer(lr) gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None] train_op = optimizer.apply_gradients(capped_gradients) Train(source_int_text, target_int_text, train_graph, train_op, cost, input_data, targets, lr, sequence_length, keep_prob, inference_logits, save_path).train()
def random_flip_left_right(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack([1.0, 1.0, uniform_random, 1.0]), 0.5) return tf.reverse(image, mirror)
def random_flip_up_down(image, seed=None): uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed) mirror = math_ops.less(tf.pack([1.0, uniform_random, 1.0, 1.0]), 0.5) return tf.reverse(image, mirror)
def __init__(self, *args): """ Create a reverse LSTM model. Args: See `LSTMModel`. """ super(ReverseLSTMModel, self).__init__(*args)
def _compute_rnn_outputs(self): reversed_inputs = tf.reverse(self.inputs, [False, True, False]) reversed_resets = tf.reverse(self.resets, [False, True, False]) self._rv_lstm = LSTM(reversed_inputs, reversed_resets, self.training, self.num_layers, self.hidden_layer_size, self.init_scale, self.dropout_keep_prob) outputs = tf.reverse(self._rv_lstm.outputs, [False, True, False]) return outputs
def ctc_label_dense_to_sparse(labels, label_lengths): # undocumented feature soon to be made public from tensorflow.python.ops import functional_ops label_shape = tf.shape(labels) num_batches_tns = stack([label_shape[0]]) max_num_labels_tns = stack([label_shape[1]]) def range_less_than(previous_state, current_input): return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input) init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool) dense_mask = functional_ops.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.boolean_mask(label_array, dense_mask) batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = tf.boolean_mask(batch_array, dense_mask) indices = tf.transpose(tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = tf.gather_nd(labels, indices) return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
def discriminator_cramer_test(self, opts, input_): """Deterministic discriminator using Cramer von Mises Test. """ add_dim = opts['z_test_proj_dim'] if add_dim > 0: dim = int(input_.get_shape()[1]) proj = np.random.rand(dim, add_dim) proj = proj - np.mean(proj, 0) norms = np.sqrt(np.sum(np.square(proj), 0) + 1e-5) proj = tf.constant(proj / norms, dtype=tf.float32) projected_x = tf.matmul(input_, proj) # Shape [batch_size, add_dim]. # Shape [batch_size, z_dim+add_dim] all_dims_x = tf.concat([input_, projected_x], 1) else: all_dims_x = input_ # top_k can only sort on the last dimension and we want to sort the # first one (batch_size). batch_size = self.get_batch_size(opts, all_dims_x) transposed = tf.transpose(all_dims_x, perm=[1, 0]) values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32)) values = tf.reverse(values, [1]) #values = tf.Print(values, [values], "sorted values") normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std'])) # normal_cdf = normal_dist.cdf(values) #normal_cdf = tf.Print(normal_cdf, [normal_cdf], "normal_cdf") expected = (2 * tf.range(1, batch_size+1, 1, dtype="float") - 1) / (2.0 * batch_size) #expected = tf.Print(expected, [expected], "expected") # We don't use the constant. # constant = 1.0 / (12.0 * batch_size * batch_size) # stat = constant + tf.reduce_sum(tf.square(expected - normal_cdf), 1) / batch_size stat = tf.reduce_sum(tf.square(expected - normal_cdf), 1) / batch_size stat = tf.reduce_mean(stat) #stat = tf.Print(stat, [stat], "stat") return stat
def discriminator_anderson_test(self, opts, input_): """Deterministic discriminator using the Anderson Darling test. """ # A-D test says to normalize data before computing the statistic # Because true mean and variance are known, we are supposed to use # the population parameters for that, but wiki says it's better to # still use the sample estimates while normalizing means = tf.reduce_mean(input_, 0) input_ = input_ - means # Broadcasting stds = tf.sqrt(1e-5 + tf.reduce_mean(tf.square(input_), 0)) input_= input_ / stds # top_k can only sort on the last dimension and we want to sort the # first one (batch_size). batch_size = self.get_batch_size(opts, input_) transposed = tf.transpose(input_, perm=[1, 0]) values, indices = tf.nn.top_k(transposed, k=tf.cast(batch_size, tf.int32)) values = tf.reverse(values, [1]) normal_dist = tf.contrib.distributions.Normal(0., float(opts['pot_pz_std'])) normal_cdf = normal_dist.cdf(values) # ln_normal_cdf is of shape (z_dim, batch_size) ln_normal_cdf = tf.log(normal_cdf) ln_one_normal_cdf = tf.log(1.0 - normal_cdf) w1 = 2 * tf.range(1, batch_size + 1, 1, dtype="float") - 1 w2 = 2 * tf.range(batch_size - 1, -1, -1, dtype="float") + 1 stat = -batch_size - tf.reduce_sum(w1 * ln_normal_cdf + \ w2 * ln_one_normal_cdf, 1) / batch_size # stat is of shape (z_dim) stat = tf.reduce_mean(tf.square(stat)) return stat