我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.tile()。
def SampleRandomFrames(model_input, num_frames, num_samples): """Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index = tf.cast( tf.multiply( tf.random_uniform([batch_size, num_samples]), tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index)
def calculate_loss(self, predictions, support_predictions, labels, **unused_params): """ support_predictions batch_size x num_models x num_classes predictions = tf.reduce_mean(support_predictions, axis=1) """ model_count = tf.shape(support_predictions)[1] vocab_size = tf.shape(support_predictions)[2] mean_predictions = tf.reduce_mean(support_predictions, axis=1, keep_dims=True) support_labels = tf.tile(tf.expand_dims(tf.cast(labels, dtype=tf.float32), axis=1), multiples=[1,model_count,1]) support_means = tf.stop_gradient(tf.tile(mean_predictions, multiples=[1,model_count,1])) support_predictions = tf.reshape(support_predictions, shape=[-1,model_count*vocab_size]) support_labels = tf.reshape(support_labels, shape=[-1,model_count*vocab_size]) support_means = tf.reshape(support_means, shape=[-1,model_count*vocab_size]) ce_loss_fn = CrossEntropyLoss() # The cross entropy between predictions and ground truth cross_entropy_loss = ce_loss_fn.calculate_loss(support_predictions, support_labels, **unused_params) # The cross entropy between predictions and mean predictions divergence = ce_loss_fn.calculate_loss(support_predictions, support_means, **unused_params) loss = cross_entropy_loss * (1.0 - FLAGS.support_loss_percent) - divergence * FLAGS.support_loss_percent return loss
def trainable_initial_state(self, batch_size): """ Create a trainable initial state for the BasicLSTMCell :param batch_size: number of samples per batch :return: LSTMStateTuple """ def _create_initial_state(batch_size, state_size, trainable=True, initializer=tf.random_normal_initializer()): with tf.device('/cpu:0'): s = tf.get_variable('initial_state', shape=[1, state_size], dtype=tf.float32, trainable=trainable, initializer=initializer) state = tf.tile(s, tf.stack([batch_size] + [1])) return state with tf.variable_scope('initial_c'): initial_c = _create_initial_state(batch_size, self._num_units) with tf.variable_scope('initial_h'): initial_h = _create_initial_state(batch_size, self._num_units) return tf.contrib.rnn.LSTMStateTuple(initial_c, initial_h)
def _create_decoder(self, encoder_output, features, _labels): attention_class = locate(self.params["attention.class"]) or \ getattr(decoders.attention, self.params["attention.class"]) attention_layer = attention_class( params=self.params["attention.params"], mode=self.mode) # If the input sequence is reversed we also need to reverse # the attention scores. reverse_scores_lengths = None if self.params["source.reverse"]: reverse_scores_lengths = features["source_len"] if self.use_beam_search: reverse_scores_lengths = tf.tile( input=reverse_scores_lengths, multiples=[self.params["inference.beam_search.beam_width"]]) return self.decoder_class( params=self.params["decoder.params"], mode=self.mode, vocab_size=self.target_vocab_info.total_size, attention_values=encoder_output.attention_values, attention_values_length=encoder_output.attention_values_length, attention_keys=encoder_output.outputs, attention_fn=attention_layer, reverse_scores_lengths=reverse_scores_lengths)
def _validate(self, machine, n=10): N = n * n # same row same z z = tf.random_normal(shape=[n, self.arch['z_dim']]) z = tf.tile(z, [1, n]) z = tf.reshape(z, [N, -1]) z = tf.Variable(z, trainable=False, dtype=tf.float32) # same column same y y = tf.range(0, 10, 1, dtype=tf.int64) y = tf.reshape(y, [-1, 1]) y = tf.tile(y, [n, 1]) Xh = machine.generate(z, y) # 100, 64, 64, 3 # Xh = gray2jet(Xh) # Xh = make_png_thumbnail(Xh, n) Xh = make_png_jet_thumbnail(Xh, n) return Xh
def _validate(self, machine, n=10): N = n * n # same row same z z = tf.random_normal(shape=[n, self.arch['z_dim']]) z = tf.tile(z, [1, n]) z = tf.reshape(z, [N, -1]) z = tf.Variable(z, trainable=False, dtype=tf.float32) # same column same y y = tf.range(0, 10, 1, dtype=tf.int64) y = tf.reshape(y, [-1,]) y = tf.tile(y, [n,]) Xh = machine.generate(z, y) # 100, 64, 64, 3 Xh = make_png_thumbnail(Xh, n) return Xh
def get_image(filepath, image_target, image_size): img = imread(filepath).astype(np.float) h_origin, w_origin = img.shape[:2] if image_target > h_origin or image_target > w_origin: image_target = min(h_origin, w_origin) h_drop = int((h_origin - image_target)/2) w_drop = int((w_origin - image_target)/2) if img.ndim == 2: img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3)) img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :] img_resize = imresize(img_crop, [image_size, image_size]) return np.array(img_resize)/127.5 - 1.
def conv_cond_concat(x, y): """Concatenate conditioning vector on feature map axis.""" #print('input x:',x.get_shape().as_list()) #print('input y:',y.get_shape().as_list()) xshape=x.get_shape() #tile by [1,64,64,1] tile_shape=tf.stack([1,xshape[1],xshape[2],1]) tile_y=tf.tile(y,tile_shape) #print('tile y:',tile_y.get_shape().as_list()) return tf.concat([x,tile_y],axis=3) #x_shapes = x.get_shape() #y_shapes = y.get_shape() #return tf.concat([ #x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
def rnn_story(self): """ run rnn for story to get last hidden state input is: story: [batch_size,story_length,embed_size] :return: last hidden state. [batch_size,embed_size] """ # 1.split input to get lists. input_split=tf.split(self.story_embedding,self.story_length,axis=1) #a list.length is:story_length.each element is:[batch_size,1,embed_size] input_list=[tf.squeeze(x,axis=1) for x in input_split] #a list.length is:story_length.each element is:[batch_size,embed_size] # 2.init keys(w_all) and values(h_all) of memory h_all=tf.get_variable("hidden_states",shape=[self.block_size,self.dimension],initializer=self.initializer)# [block_size,hidden_size] w_all=tf.get_variable("keys", shape=[self.block_size,self.dimension],initializer=self.initializer)# [block_size,hidden_size] # 3.expand keys and values to prepare operation of rnn w_all_expand=tf.tile(tf.expand_dims(w_all,axis=0),[self.batch_size,1,1]) #[batch_size,block_size,hidden_size] h_all_expand=tf.tile(tf.expand_dims(h_all,axis=0),[self.batch_size,1,1]) #[batch_size,block_size,hidden_size] # 4. run rnn using input with cell. for i,input in enumerate(input_list): h_all_expand=self.cell(input,h_all_expand,w_all_expand,i) #w_all:[batch_size,block_size,hidden_size]; h_all:[batch_size,block_size,hidden_size] return h_all_expand #[batch_size,block_size,hidden_size]
def SoftArgmin(outputLeft, outputRight, D=192): left_result_D = outputLeft right_result_D = outputRight left_result_D_squeeze = tf.squeeze(left_result_D, axis=[0, 4]) right_result_D_squeeze = tf.squeeze(right_result_D, axis=[0, 4]) # 192 256 512 left_result_softmax = tf.nn.softmax(left_result_D_squeeze, dim=0) right_result_softmax = tf.nn.softmax(right_result_D_squeeze, dim=0) # 192 256 512 d_grid = tf.cast(tf.range(D), tf.float32) d_grid = tf.reshape(d_grid, (-1, 1, 1)) d_grid = tf.tile(d_grid, [1, 256, 512]) left_softargmin = tf.reduce_sum(tf.multiply(left_result_softmax, d_grid), axis=0, keep_dims=True) right_softargmin = tf.reduce_sum(tf.multiply(right_result_softmax, d_grid), axis=0, keep_dims=True) return left_softargmin, right_softargmin
def _attention(self, prev_decoder_state, prev_embedding): with tf.variable_scope('attention') as scope: # e = score of shape [batch_size, output_seq_length, input_seq_length], e_{ij} = score(s_{i-1}, h_j) # e_i = score of shape [batch_size, input_seq_length], e_ij = score(prev_decoder_state, h_j) e_i = self._score(prev_decoder_state, prev_embedding) # alpha_i = softmax(e_i) of shape [batch_size, input_seq_length] alpha_i = tf.nn.softmax(e_i) resized_alpha_i = tf.reshape(tf.tile(alpha_i, [1, self.encoder_output_size]), [self.batch_size, -1, self.encoder_output_size]) if self.mode == 'decode': c_i = tf.reduce_sum(tf.multiply(resized_alpha_i, self.pre_computed_encoder_states_placeholder), axis=1) else: c_i = tf.reduce_sum(tf.multiply(resized_alpha_i, self.encoder_outputs), axis=1) return c_i, e_i
def ar_layer(z0,hps,n_hidden=10): ''' old iaf layer ''' # Repeat input z_rep = tf.reshape(tf.tile(z0,[1,hps.z_size]),[-1,hps.z_size]) # make mask mask = tf.sequence_mask(tf.range(hps.z_size),hps.z_size)[None,:,:] mask = tf.reshape(tf.tile(mask,[tf.shape(z0)[0],1,1]),[-1,hps.z_size]) # predict mu and sigma z_mask = z_rep * tf.to_float(mask) mid = slim.fully_connected(z_mask,n_hidden,activation_fn=tf.nn.relu) pars = slim.fully_connected(mid,2,activation_fn=None) pars = tf.reshape(pars,[-1,hps.z_size,2]) mu, log_sigma = tf.unstack(pars,axis=2) return mu, log_sigma
def kSparse(self, x, topk): print 'run regular k-sparse' dim = int(x.get_shape()[1]) if topk > dim: warnings.warn('Warning: topk should not be larger than dim: %s, found: %s, using %s' % (dim, topk, dim)) topk = dim k = dim - topk values, indices = tf.nn.top_k(-x, k) # indices will be [[0, 1], [2, 1]], values will be [[6., 2.], [5., 4.]] # We need to create full indices like [[0, 0], [0, 1], [1, 2], [1, 1]] my_range = tf.expand_dims(tf.range(0, tf.shape(indices)[0]), 1) # will be [[0], [1]] my_range_repeated = tf.tile(my_range, [1, k]) # will be [[0, 0], [1, 1]] full_indices = tf.stack([my_range_repeated, indices], axis=2) # change shapes to [N, k, 1] and [N, k, 1], to concatenate into [N, k, 2] full_indices = tf.reshape(full_indices, [-1, 2]) to_reset = tf.sparse_to_dense(full_indices, tf.shape(x), tf.reshape(values, [-1]), default_value=0., validate_indices=False) res = tf.add(x, to_reset) return res
def update_link_matrix(self, link_matrix_old, precedence_weighting_old, write_weighting): """ Updating the link matrix takes some effort (in order to vectorize the implementation) Instead of the original index-by-index operation, it's all done at once. :param link_matrix_old: from previous time step, shape [batch_size, memory_size, memory_size] :param precedence_weighting_old: from previous time step, shape [batch_size, memory_size] :param write_weighting: from current time step, shape [batch_size, memory_size] :return: updated link matrix """ expanded = tf.expand_dims(write_weighting, axis=2) # vectorizing the paper's original implementation w = tf.tile(expanded, [1, 1, self.memory_size]) # shape [batch_size, memory_size, memory_size] # shape of w_transpose is the same: [batch_size, memory_size, memory_size] w_transp = tf.tile(tf.transpose(expanded, [0, 2, 1]), [1, self.memory_size, 1]) # in einsum, m and n are the same dimension because tensorflow doesn't support duplicated subscripts. Why? lm = (1 - w - w_transp) * link_matrix_old + tf.einsum("bn,bm->bmn", precedence_weighting_old, write_weighting) lm *= (1 - tf.eye(self.memory_size, batch_shape=[self.batch_size])) # making sure self links are off return tf.identity(lm, name="Link_matrix")
def __init__(self, directory, num_act, mean_path, num_threads=1, capacity=1e5, batch_size=32, scale=(1.0/255.0), s_t_shape=[84, 84, 4], x_t_1_shape=[84, 84, 1], colorspace='gray'): self.scale = scale self.s_t_shape = s_t_shape self.x_t_1_shape = x_t_1_shape # Load image mean mean = np.load(os.path.join(mean_path)) # Prepare data flow s_t, a_t, x_t_1 = _read_and_decode(directory, s_t_shape=s_t_shape, num_act=num_act, x_t_1_shape=x_t_1_shape) self.mean = mean self.s_t_batch, self.a_t_batch, self.x_t_1_batch = tf.train.shuffle_batch([s_t, a_t, x_t_1], batch_size=batch_size, capacity=capacity, min_after_dequeue=int(capacity*0.25), num_threads=num_threads) # Subtract image mean (according to J Oh design) self.mean_const = tf.constant(mean, dtype=tf.float32) print(self.mean_const.get_shape()) self.s_t_batch = (self.s_t_batch - tf.tile(self.mean_const, [1, 1, 4])) * scale self.x_t_1_batch = (self.x_t_1_batch - self.mean_const) * scale
def distance_biases(time_steps, window_size=10, reuse=False): """ Return a 2-d tensor with the values of the distance biases to be applied on the intra-attention matrix of size sentence_size Args: time_steps: tensor scalar window_size: window size reuse: reuse variables Returns: 2-d tensor (time_steps, time_steps) """ with tf.variable_scope('distance-bias', reuse=reuse): # this is d_{i-j} distance_bias = tf.get_variable('dist_bias', [window_size], initializer=tf.zeros_initializer()) r = tf.range(0, time_steps) r_matrix = tf.tile(tf.reshape(r, [1, -1]), tf.stack([time_steps, 1])) raw_idxs = r_matrix - tf.reshape(r, [-1, 1]) clipped_idxs = tf.clip_by_value(raw_idxs, 0, window_size - 1) values = tf.nn.embedding_lookup(distance_bias, clipped_idxs) return values
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob): start_tokens = tf.tile( tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size], name='start_tokens') # Define the helper helper = tf.contrib.seq2seq.GreedyEmbeddingHelper( dec_embeddings, start_tokens, end_of_sequence_id) # Define the decoder decoder = tf.contrib.seq2seq.BasicDecoder( dec_cell, helper, encoder_state, output_layer) # Run the decoder infer_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode( decoder, impute_finished=True, maximum_iterations=max_target_sequence_length) return infer_decoder_output
def __call__(self, u_t, a, b, scope=None): """ :param u_t: [N, M, d] :param a: [N, M. 1] :param b: [N, M. 1] :param mask: [N, M] :return: """ N, M, d = self.batch_size, self.mem_size, self.hidden_size L, sL = self.L, self.sL with tf.name_scope(scope or self.__class__.__name__): L = tf.tile(tf.expand_dims(L, 0), [N, 1, 1]) sL = tf.tile(tf.expand_dims(sL, 0), [N, 1, 1]) logb = tf.log(b + 1e-9) logb = tf.concat(1, [tf.zeros([N, 1, 1]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])]) left = L * tf.exp(tf.batch_matmul(L, logb * sL)) # [N, M, M] right = a * u_t # [N, M, d] u = tf.batch_matmul(left, right) # [N, M, d] return u
def __call__(self, u_t, a, b, scope=None): """ :param u_t: [N, M, d] :param a: [N, M. d] :param b: [N, M. d] :param mask: [N, M] :return: """ N, M, d = self.batch_size, self.mem_size, self.hidden_size L, sL = self.L, self.sL with tf.name_scope(scope or self.__class__.__name__): L = tf.tile(tf.expand_dims(tf.expand_dims(L, 0), 0), [N, d, 1, 1]) sL = tf.tile(tf.expand_dims(tf.expand_dims(sL, 0), 0), [N, d, 1, 1]) logb = tf.log(b + 1e-9) # [N, M, d] logb = tf.concat(1, [tf.zeros([N, 1, d]), tf.slice(logb, [0, 1, 0], [-1, -1, -1])]) # [N, M, d] logb = tf.expand_dims(tf.transpose(logb, [0, 2, 1]), -1) # [N, d, M, 1] left = L * tf.exp(tf.batch_matmul(L, logb * sL)) # [N, d, M, M] right = a * u_t # [N, M, d] right = tf.expand_dims(tf.transpose(right, [0, 2, 1]), -1) # [N, d, M, 1] u = tf.batch_matmul(left, right) # [N, d, M, 1] u = tf.transpose(tf.squeeze(u, [3]), [0, 2, 1]) # [N, M, d] return u
def get_output_for(self, input, **kwargs): input_shape = tf.shape(input) n_batches = input_shape[0] n_steps = input_shape[1] input = tf.reshape(input, tf.pack([n_batches, n_steps, -1])) if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']: h0s = kwargs['recurrent_state'][self] else: h0s = tf.tile( tf.reshape(self.h0, (1, self.num_units)), (n_batches, 1) ) # flatten extra dimensions shuffled_input = tf.transpose(input, (1, 0, 2)) hs = tf.scan( self.step, elems=shuffled_input, initializer=h0s ) shuffled_hs = tf.transpose(hs, (1, 0, 2)) if 'recurrent_state_output' in kwargs: kwargs['recurrent_state_output'][self] = shuffled_hs return shuffled_hs
def get_output_for(self, input, **kwargs): input_shape = tf.shape(input) n_batches = input_shape[0] n_steps = input_shape[1] input = tf.reshape(input, tf.pack([n_batches, n_steps, -1])) c0s = tf.tile( tf.reshape(self.c0, (1, self.num_units)), (n_batches, 1) ) h0s = self.nonlinearity(c0s) # flatten extra dimensions shuffled_input = tf.transpose(input, (1, 0, 2)) hcs = tf.scan( self.step, elems=shuffled_input, initializer=tf.concat(1, [h0s, c0s]) ) shuffled_hcs = tf.transpose(hcs, (1, 0, 2)) shuffled_hs = shuffled_hcs[:, :, :self.num_units] shuffled_cs = shuffled_hcs[:, :, self.num_units:] return shuffled_hs
def create(self, args): self.inputsa = hg.inputs.image_loader.ImageLoader(args.batch_size) self.inputsa.create(args.directory, channels=channels, format=args.format, crop=args.crop, width=width, height=height, resize=True) xa = self.inputsa.x xb = tf.tile(tf.image.rgb_to_grayscale(xa), [1,1,1,3]) self.xa = xa self.x = xa #TODO remove self.xb = xb
def inner_tile(tensor, shape, freq): """ """ if isinstance(freq, int): freq = freq_for_shape(freq, shape) small_shape = [int(shape[0] / freq[0]), int(shape[1] / freq[1]), shape[2]] y_index = tf.tile(column_index(small_shape) * freq[0], [freq[0], freq[0]]) x_index = tf.tile(row_index(small_shape) * freq[1], [freq[0], freq[0]]) tiled = tf.gather_nd(tensor, tf.stack([y_index, x_index], 2)) tiled = resample(tiled, shape, spline_order=1) return tiled
def row_index(shape): """ Generate an X index for the given tensor. .. code-block:: python [ [ 0, 1, 2, ... width-1 ], [ 0, 1, 2, ... width-1 ], ... (x height) ] :param list[int] shape: :return: Tensor """ height = shape[0] width = shape[1] row_identity = tf.cumsum(tf.ones([width], dtype=tf.int32), exclusive=True) row_identity = tf.reshape(tf.tile(row_identity, [height]), [height, width]) return row_identity
def align(hid_align, h_dec, scope): h_dec_align = linear3(h_dec, dim_align, "h_dec_align_"+scope) #batch_size x dimAlign h_dec_align = tf.reshape(h_dec_align,[batch_size,1,dim_align]) h_dec_align_tiled = tf.tile(h_dec_align, [1, sentence_length, 1]) all_align = tf.tanh(h_dec_align + hid_align) with tf.variable_scope("v_align_"+scope, reuse = DO_SHARE): v_align=tf.get_variable("v_align_"+scope, [dim_align], initializer=tf.constant_initializer(0.0)) e_t = all_align * v_align e_t = tf.reduce_sum(e_t, 2) # normalise alpha = tf.nn.softmax(e_t) # batch_size x sentence_length alpha_t = tf.reshape(alpha, [batch_size, sentence_length, 1]) alpha_tile = tf.tile(alpha_t, [1, 1, 2*y_enc_size]) s_t = tf.multiply(alpha_tile, h_t_lang) s_t = tf.reduce_sum(s_t, 1) return s_t,alpha
def run_lstm(self, encoded_rep, q_rep, masks): encoded_question, encoded_passage = encoded_rep masks_question, masks_passage = masks q_rep = tf.expand_dims(q_rep, 1) # (batch_size, 1, D) encoded_passage_shape = tf.shape(encoded_passage)[1] q_rep = tf.tile(q_rep, [1, encoded_passage_shape, 1]) mixed_question_passage_rep = tf.concat([encoded_passage, q_rep], axis=-1) with tf.variable_scope("lstm_"): cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_size, state_is_tuple = True) reverse_mixed_question_passage_rep = _reverse(mixed_question_passage_rep, masks_passage, 1, 0) output_attender_fw, _ = tf.nn.dynamic_rnn(cell, mixed_question_passage_rep, dtype=tf.float32, scope ="rnn") output_attender_bw, _ = tf.nn.dynamic_rnn(cell, reverse_mixed_question_passage_rep, dtype=tf.float32, scope = "rnn") output_attender_bw = _reverse(output_attender_bw, masks_passage, 1, 0) output_attender = tf.concat([output_attender_fw, output_attender_bw], axis = -1) # (-1, P, 2*H) return output_attender
def __init__(self,input_shape,control_points_ratio): self.num_batch = input_shape[0] self.depth = input_shape[1] self.height = input_shape[2] self.width = input_shape[3] self.num_channels = input_shape[4] self.out_height = self.height self.out_width = self.width self.out_depth = self.depth self.X_controlP_number = int(input_shape[3] / \ (control_points_ratio)) self.Y_controlP_number = int(input_shape[2] / \ (control_points_ratio)) self.Z_controlP_number = int(input_shape[1] / \ (control_points_ratio)) init_x = np.linspace(-5,5,self.X_controlP_number) init_y = np.linspace(-5,5,self.Y_controlP_number) init_z = np.linspace(-5,5,self.Z_controlP_number) x_s = np.tile(init_x, [self.Y_controlP_number*self.Z_controlP_number]) y_s = np.tile(np.repeat(init_y,self.X_controlP_number),[self.Z_controlP_number]) z_s = np.repeat(init_z,self.X_controlP_number*self.Y_controlP_number) self.initial = np.array([x_s,y_s,z_s])
def _local_Networks(self,input_dim,x): with tf.variable_scope('_local_Networks'): x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels]) W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20]) b_fc_loc1 = bias_variable([20]) W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3]) initial = self.initial.astype('float32') initial = initial.flatten() b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2') h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1) h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2) #temp use if Debug == True: x = np.linspace(-1.0,1.0,self.X_controlP_number) y = np.linspace(-1.0,1.0,self.Y_controlP_number) z = np.linspace(-1.0,1.0,self.Z_controlP_number) x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64') y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number]) z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64') h_fc_loc2 = tf.concat([x_s,y_s,z_s],0) h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch]) h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1]) #2*(4*4*4)*3->(2,192) return h_fc_loc2
def __init__(self): self._num_classes = cfg.NUM_CLASSES self._batch_size = cfg.TRAIN.BATCH_SIZE self._latent_size = 128 self._hidden_size = 256 self._x_labeled = tf.placeholder(tf.float32, shape=[self._batch_size, 28, 28, 1]) self._x_unlabeled = tf.placeholder(tf.float32, shape=[self._batch_size, 28, 28, 1]) self._x = tf.concat([self._x_labeled, self._x_unlabeled], 0) self._y_labeled = tf.placeholder(tf.float32, shape=[self._batch_size, self._num_classes]) self._y_all, self.y_unlabeled = self.generate_y(self._y_labeled) self._losses = {} self._initializer = self.define_initializer() self._blocks_encoder = [resnet_utils.Block('block4', bottleneck, [(256, 128, 1)] * 3)] self._blocks_decoder_valid = [resnet_utils.Block('block5', bottleneck_trans_valid, [(256, 128, 1), (256, 128, 2)])] self._blocks_decoder_same = [resnet_utils.Block('block5', bottleneck_trans_same, [(256, 128, 2), (256, 128, 2)])] self._resnet_scope = 'resnet_v1_%d' % 101 x_unlabeled_tiled = tf.tile(self._x_unlabeled, [self._num_classes, 1, 1, 1]) # (100, 256) --> (2100, 256) self.outputs = {'labeled': {'x_in': self._x_labeled}, 'unlabeled': {'x_in': x_unlabeled_tiled}}
def encoder(self, x): with tf.variable_scope('encoder'): net = resnet_utils.conv2d_same(x, 64, 7, stride=2, scope='conv1') net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]]) x = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1') x_features_all, _ = resnet_v1.resnet_v1(x, self._blocks_encoder, global_pool=False, include_root_block=False, scope=self._resnet_scope) x_features_all = tf.reduce_mean(x_features_all, axis=[1, 2]) x_features_labeled, x_features_unlabeled = tf.split(x_features_all, 2) x_features_tiled = tf.tile(x_features_unlabeled, [self._num_classes, 1]) # (100, 256) --> (2100, 256) x_features = tf.concat([x_features_labeled, x_features_tiled], 0) # (2100, 256) --> (2200, 256) return x_features
def __init__(self, dims, multiples, name="tile_by_dim"): """Constructs the `TileByDim` module. Args: dims: The dimensions to tile along, as a list of unique integers. multiples: The multiple of the tiling, as a list of integers. Must be the same length as the `dims` list. name: The name of the module. Raises: ValueError: If `dims` has non-unique integers, or if the size of `multiples` is different from the size of `dims`. """ super(TileByDim, self).__init__(name=name) self._dims = dims self._multiples = multiples if np.unique(dims).size != len(dims): raise ValueError("dims must not have any repeated integers.") if len(multiples) != len(dims): raise ValueError( "multiples must have the same length as dims: {}.".format(len(dims)))
def _build(self, inputs): """Connects the `TileByDim` module into the graph. Args: inputs: `Tensor` to tile. Returns: The tiled tensor. """ shape_inputs = inputs.get_shape().as_list() rank = len(shape_inputs) # Builds default lists for multiples to pass to `tf.tile`. full_multiples = [1] * rank # Updates lists with what the user provided. for dim, multiple in zip(self._dims, self._multiples): full_multiples[dim] = multiple return tf.tile(inputs, multiples=full_multiples)
def testComparison(self): # Here we compare the output with the `tf.tile` equivalent. in_shape = [2, 3, 4] inputs = tf.random_uniform(shape=in_shape) dims = [0, 2] multiples = [2, 4] mod = snt.TileByDim(dims=dims, multiples=multiples) output = mod(inputs) multiple_tf = [2, 1, 4] ref_output = tf.tile(inputs, multiples=multiple_tf) with self.test_session() as sess: actual, expected = sess.run([output, ref_output]) self.assertAllEqual(actual, expected)
def initialize(self, name=None): finished = tf.tile([False], [self.config.beam_width]) start_tokens_batch = tf.fill([self.config.beam_width], self.start_tokens) first_inputs = tf.nn.embedding_lookup(self.target_embedding, start_tokens_batch) first_inputs = tf.expand_dims(first_inputs, 1) zeros_padding = tf.zeros([self.config.beam_width, self.params['max_decode_length']-1, self.target_embedding.get_shape().as_list()[-1]]) first_inputs = tf.concat([first_inputs, zeros_padding], axis=1) outputs = tf.tile(self.initial_state.outputs, [self.config.beam_width,1,1]) attention_values = tf.tile(self.initial_state.attention_values, [self.config.beam_width,1,1]) enc_output = EncoderOutput( outputs=outputs, final_state=self.initial_state.final_state, attention_values=attention_values, attention_values_length=self.initial_state.attention_values_length) return finished, first_inputs, enc_output
def initialize(self, name=None): finished = tf.tile([False], [self.config.beam_width]) start_tokens_batch = tf.fill([self.config.beam_width], self.start_tokens) first_inputs = tf.nn.embedding_lookup(self.target_embedding, start_tokens_batch) first_inputs = tf.expand_dims(first_inputs, 1) zeros_padding = tf.zeros([self.config.beam_width, self.params['max_decode_length']-1, self.target_embedding.get_shape().as_list()[-1]]) first_inputs = tf.concat([first_inputs, zeros_padding], axis=1) beam_state = beam_search.create_initial_beam_state(self.config) outputs = tf.tile(self.initial_state.outputs, [self.config.beam_width,1,1]) attention_values = tf.tile(self.initial_state.attention_values, [self.config.beam_width,1,1]) enc_output = EncoderOutput( outputs=outputs, final_state=self.initial_state.final_state, attention_values=attention_values, attention_values_length=self.initial_state.attention_values_length) return finished, first_inputs, (enc_output, beam_state)
def __call__(self, inputs, state, scope=None): """ :param inputs: [N*B, I + B] :param state: [N*B, d] :param scope: :return: [N*B, d] """ with tf.variable_scope(scope or self.__class__.__name__): d = self.state_size x = tf.slice(inputs, [0, 0], [-1, self._input_size]) # [N*B, I] mask = tf.slice(inputs, [0, self._input_size], [-1, -1]) # [N*B, B] B = tf.shape(mask)[1] prev_state = tf.expand_dims(tf.reshape(state, [-1, B, d]), 1) # [N, B, d] -> [N, 1, B, d] mask = tf.tile(tf.expand_dims(tf.reshape(mask, [-1, B, B]), -1), [1, 1, 1, d]) # [N, B, B, d] # prev_state = self._reduce_func(tf.tile(prev_state, [1, B, 1, 1]), 2) prev_state = self._reduce_func(exp_mask(prev_state, mask), 2) # [N, B, d] prev_state = tf.reshape(prev_state, [-1, d]) # [N*B, d] return self._cell(x, prev_state)
def __call__(self, inputs, state, scope=None): """ :param inputs: [N, d + JQ + JQ * d] :param state: [N, d] :param scope: :return: """ with tf.variable_scope(scope or self.__class__.__name__): c_prev, h_prev = state x = tf.slice(inputs, [0, 0], [-1, self._input_size]) q_mask = tf.slice(inputs, [0, self._input_size], [-1, self._q_len]) # [N, JQ] qs = tf.slice(inputs, [0, self._input_size + self._q_len], [-1, -1]) qs = tf.reshape(qs, [-1, self._q_len, self._input_size]) # [N, JQ, d] x_tiled = tf.tile(tf.expand_dims(x, 1), [1, self._q_len, 1]) # [N, JQ, d] h_prev_tiled = tf.tile(tf.expand_dims(h_prev, 1), [1, self._q_len, 1]) # [N, JQ, d] f = tf.tanh(linear([qs, x_tiled, h_prev_tiled], self._input_size, True, scope='f')) # [N, JQ, d] a = tf.nn.softmax(exp_mask(linear(f, 1, True, squeeze=True, scope='a'), q_mask)) # [N, JQ] q = tf.reduce_sum(qs * tf.expand_dims(a, -1), 1) z = tf.concat(1, [x, q]) # [N, 2d] return self._cell(z, state)
def get_double_linear_controller(size, bias, input_keep_prob=1.0, is_train=None): def double_linear_controller(inputs, state, memory): """ :param inputs: [N, i] :param state: [N, d] :param memory: [N, M, m] :return: [N, M] """ rank = len(memory.get_shape()) _memory_size = tf.shape(memory)[rank-2] tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1]) if isinstance(state, tuple): tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1]) for each in state] else: tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])] # [N, M, d] in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory]) out = double_linear_logits(in_, size, bias, input_keep_prob=input_keep_prob, is_train=is_train) return out return double_linear_controller
def get_linear_controller(bias, input_keep_prob=1.0, is_train=None): def linear_controller(inputs, state, memory): rank = len(memory.get_shape()) _memory_size = tf.shape(memory)[rank-2] tiled_inputs = tf.tile(tf.expand_dims(inputs, 1), [1, _memory_size, 1]) if isinstance(state, tuple): tiled_states = [tf.tile(tf.expand_dims(each, 1), [1, _memory_size, 1]) for each in state] else: tiled_states = [tf.tile(tf.expand_dims(state, 1), [1, _memory_size, 1])] # [N, M, d] in_ = tf.concat(2, [tiled_inputs] + tiled_states + [memory]) out = linear(in_, 1, bias, squeeze=True, input_keep_prob=input_keep_prob, is_train=is_train) return out return linear_controller
def get_output_for(self, input, **kwargs): input_shape = tf.shape(input) n_batches = input_shape[0] n_steps = input_shape[1] input = tf.reshape(input, tf.stack([n_batches, n_steps, -1])) if 'recurrent_state' in kwargs and self in kwargs['recurrent_state']: h0s = kwargs['recurrent_state'][self] else: h0s = tf.tile( tf.reshape(self.h0, (1, self.num_units)), (n_batches, 1) ) # flatten extra dimensions shuffled_input = tf.transpose(input, (1, 0, 2)) hs = tf.scan( self.step, elems=shuffled_input, initializer=h0s ) shuffled_hs = tf.transpose(hs, (1, 0, 2)) if 'recurrent_state_output' in kwargs: kwargs['recurrent_state_output'][self] = shuffled_hs return shuffled_hs