我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.placeholder()。
def _test_metric_spec(self, metric_spec, hyps, refs, expected_scores): """Tests a MetricSpec""" predictions = {"predicted_tokens": tf.placeholder(dtype=tf.string)} labels = {"target_tokens": tf.placeholder(dtype=tf.string)} value, update_op = metric_spec.create_metric_ops(None, labels, predictions) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) scores = [] for hyp, ref in zip(hyps, refs): hyp = hyp.split(" ") ref = ref.split(" ") sess.run(update_op, { predictions["predicted_tokens"]: [hyp], labels["target_tokens"]: [ref] }) scores.append(sess.run(value)) for score, expected in zip(scores, expected_scores): np.testing.assert_almost_equal(score, expected, decimal=2) np.testing.assert_almost_equal(score, expected, decimal=2)
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): # review input - Both original and reversed self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
def _load_data_graph(self): """ Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary) placeholders and the weights of the hidden layer of the Seq2Seq model. :return: None """ # input with tf.variable_scope("train_test", reuse=True): self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t) for t in range(self.seq_length)] # desired output self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t) for t in range(self.seq_length)] # weight of the hidden layer self.weights = [tf.ones_like(labels_t, dtype=tf.float32) for labels_t in self.labels] # Decoder input: prepend some "GO" token and drop the final # token of the encoder input self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
def build_model(self): self.q = tf.placeholder(tf.float32, [self.reader.vocab_size], name="question") self.a = tf.placeholder(tf.float32, [self.reader.vocab_size], name="answer") self.build_encoder() self.build_decoder() # Kullback Leibler divergence self.e_loss = -0.5 * tf.reduce_sum(1 + self.log_sigma_sq - tf.square(self.mu) - tf.exp(self.log_sigma_sq)) # Log likelihood self.g_loss = tf.reduce_sum(tf.log(self.p_x_i)) self.loss = tf.reduce_mean(self.e_loss + self.g_loss) self.optim = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(-self.loss) _ = tf.scalar_summary("encoder loss", self.e_loss) _ = tf.scalar_summary("decoder loss", self.g_loss) _ = tf.scalar_summary("loss", self.loss)
def __init__(self, files_list, thread_count, batch_size, numcep, numcontext, next_index=lambda x: x + 1): self._coord = None self._numcep = numcep self._x = tf.placeholder(tf.float32, [None, numcep + (2 * numcep * numcontext)]) self._x_length = tf.placeholder(tf.int32, []) self._y = tf.placeholder(tf.int32, [None,]) self._y_length = tf.placeholder(tf.int32, []) self.example_queue = tf.PaddingFIFOQueue(shapes=[[None, numcep + (2 * numcep * numcontext)], [], [None,], []], dtypes=[tf.float32, tf.int32, tf.int32, tf.int32], capacity=2 * self._get_device_count() * batch_size) self._enqueue_op = self.example_queue.enqueue([self._x, self._x_length, self._y, self._y_length]) self._close_op = self.example_queue.close(cancel_pending_enqueues=True) self.batch_size = batch_size self._numcontext = numcontext self._thread_count = thread_count self._files_list = self._create_files_list(files_list) self._next_index = next_index
def get_video_weights(video_id_batch): video_id_to_index = tf.contrib.lookup.string_to_index_table_from_file( vocabulary_file=FLAGS.sample_vocab_file, default_value=0) indexes = video_id_to_index.lookup(video_id_batch) weights, length = get_video_weights_array() weights_input = tf.placeholder(tf.float32, shape=[length], name="sample_weights_input") weights_tensor = tf.get_variable("sample_weights", shape=[length], trainable=False, dtype=tf.float32, initializer=tf.constant_initializer(weights)) weights_assignment = tf.assign(weights_tensor, weights_input) tf.add_to_collection("weights_input", weights_input) tf.add_to_collection("weights_assignment", weights_assignment) video_weight_batch = tf.nn.embedding_lookup(weights_tensor, indexes) return video_weight_batch
def __init__(self, ob_space, ac_space, layers=[256], **kwargs): self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space)) rank = len(ob_space) if rank == 3: # pixel input for i in range(4): x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2])) elif rank == 1: # plain features #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01))) pass else: raise TypeError("observation space must have rank 1 or 3, got %d" % rank) x = flatten(x) for i, layer in enumerate(layers): x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer())) self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer()) self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1]) self.sample = categorical_sample(self.logits, ac_space)[0, :] self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name) self.state_in = []
def __init__(self, ob_space, ac_space, size=256, **kwargs): self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space)) for i in range(4): x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2])) # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim x = tf.expand_dims(flatten(x), 1) gru = rnn.GRUCell(size) h_init = np.zeros((1, size), np.float32) self.state_init = [h_init] h_in = tf.placeholder(tf.float32, [1, size]) self.state_in = [h_in] gru_outputs, gru_state = tf.nn.dynamic_rnn( gru, x, initial_state=h_in, sequence_length=[size], time_major=True) x = tf.reshape(gru_outputs, [-1, size]) self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01)) self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1]) self.state_out = [gru_state[:1]] self.sample = categorical_sample(self.logits, ac_space)[0, :] self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def __init__(self, shape, name=None): """Takes input in uint8 format which is cast to float32 and divided by 255 before passing it to the model. On GPU this ensures lower data transfer times. Parameters ---------- shape: [int] shape of the tensor. name: str name of the underlying placeholder """ super().__init__(tf.placeholder(tf.uint8, [None] + list(shape), name=name)) self._shape = shape self._output = tf.cast(super().get(), tf.float32) / 255.0
def test_qrnn_linear_forward(self): batch_size = 100 sentence_length = 5 word_size = 10 size = 5 data = self.create_test_data(batch_size, sentence_length, word_size) with tf.Graph().as_default() as q_linear: qrnn = QRNN(in_size=word_size, size=size, conv_size=1) X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size]) forward_graph = qrnn.forward(X) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) hidden = sess.run(forward_graph, feed_dict={X: data}) self.assertEqual((batch_size, size), hidden.shape)
def test_qrnn_with_previous(self): batch_size = 100 sentence_length = 5 word_size = 10 size = 5 data = self.create_test_data(batch_size, sentence_length, word_size) with tf.Graph().as_default() as q_with_previous: qrnn = QRNN(in_size=word_size, size=size, conv_size=2) X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size]) forward_graph = qrnn.forward(X) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) hidden = sess.run(forward_graph, feed_dict={X: data}) self.assertEqual((batch_size, size), hidden.shape)
def test_qrnn_convolution(self): batch_size = 100 sentence_length = 5 word_size = 10 size = 5 data = self.create_test_data(batch_size, sentence_length, word_size) with tf.Graph().as_default() as q_conv: qrnn = QRNN(in_size=word_size, size=size, conv_size=3) X = tf.placeholder(tf.float32, [batch_size, sentence_length, word_size]) forward_graph = qrnn.forward(X) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) hidden = sess.run(forward_graph, feed_dict={X: data}) self.assertEqual((batch_size, size), hidden.shape)
def __init__(self, check_): self.img_feed = tf.placeholder(tf.float32) self.output_logits = tf.nn.softmax( models.foodv_test( self.img_feed, reg_val=0.0, is_train=False, dropout_p=1.0)) self.sess = tf.Session() self.checkpoint_name = check_ saver = tf.train.Saver() print("loading model...") saver.restore(self.sess, self.checkpoint_name) print("Model loaded !")
def __init__(self, embedding): self.sess = tf.Session() self.inputs = tf.placeholder(tf.float32, [None, embedding.shape[1]], name='inputs') self.test_vec = tf.placeholder(tf.float32, [1, embedding.shape[1]], name='test_vec') self.cos_distance = tf.matmul(self.inputs, tf.transpose(self.test_vec)) #----------------------------------------------------------------------- # Compute normalized embedding matrix #----------------------------------------------------------------------- row_sum = tf.reduce_sum(tf.square(self.inputs), axis=1, keep_dims=True) norm = tf.sqrt(row_sum) self.normalized = self.inputs / norm self.embedding = self.sess.run(self.normalized, feed_dict={self.inputs: embedding}) #---------------------------------------------------------------------------
def __init__(self, rnd_vec_dim, hidden_units, output_dim, alpha): #----------------------------------------------------------------------- # Inputs #----------------------------------------------------------------------- self.inputs_rnd = tf.placeholder(tf.float32, (None, rnd_vec_dim), name='inputs_rnd') #----------------------------------------------------------------------- # The generator #----------------------------------------------------------------------- self.alpha = alpha with tf.variable_scope('generator'): h1 = tf.layers.dense(self.inputs_rnd, hidden_units, activation=None) h1 = LeakyReLU(h1, self.alpha) self.gen_logits = tf.layers.dense(h1, output_dim, activation=None) self.gen_out = tf.tanh(self.gen_logits) #---------------------------------------------------------------------------
def __init__(self, name='model', sess=None): assert sess != None self.name = name self.sess = sess self.x = tf.placeholder(tf.float32, [None, img_dim[0], img_dim[1], img_dim[2]], name='Observaion') self.y = tf.placeholder(tf.float32, [None, n_action], name='Steer') self._build_net(True, False) self._build_net(False, True) self._define_train_ops() tl.layers.initialize_global_variables(self.sess) print() self.n_test.print_layers() print() self.n_test.print_params(False) print() # exit()
def build_model(self): Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z]) Y = tf.placeholder(tf.float32, [self.batch_size, self.dim_y]) image_real = tf.placeholder(tf.float32, [self.batch_size]+self.image_shape) h4 = self.generate(Z,Y) #image_gen comes from sigmoid output of generator image_gen = tf.nn.sigmoid(h4) raw_real2 = self.discriminate(image_real, Y) #p_real = tf.nn.sigmoid(raw_real) p_real=tf.reduce_mean(raw_real2) raw_gen2 = self.discriminate(image_gen, Y) #p_gen = tf.nn.sigmoid(raw_gen) p_gen = tf.reduce_mean(raw_gen2) discrim_cost = tf.reduce_sum(raw_real2) - tf.reduce_sum(raw_gen2) gen_cost = -tf.reduce_mean(raw_gen2) return Z, Y, image_real, discrim_cost, gen_cost, p_real, p_gen
def samples_generator(self, batch_size): Z = tf.placeholder(tf.float32, [batch_size, self.dim_z]) Y = tf.placeholder(tf.float32, [batch_size, self.dim_y]) yb = tf.reshape(Y, [batch_size, 1, 1, self.dim_y]) Z_ = tf.concat([Z,Y], 1) h1 = tf.nn.relu(batchnormalize(tf.matmul(Z_, self.gen_W1))) h1 = tf.concat([h1, Y], 1) h2 = tf.nn.relu(batchnormalize(tf.matmul(h1, self.gen_W2))) h2 = tf.reshape(h2, [batch_size,6,6,self.dim_W2]) h2 = tf.concat([h2, yb*tf.ones([batch_size, 6,6, self.dim_y])], 3) output_shape_l3 = [batch_size,12,12,self.dim_W3] h3 = tf.nn.conv2d_transpose(h2, self.gen_W3, output_shape=output_shape_l3, strides=[1,2,2,1]) h3 = tf.nn.relu( batchnormalize(h3) ) h3 = tf.concat([h3, yb*tf.ones([batch_size, 12,12,self.dim_y])], 3) output_shape_l4 = [batch_size,24,24,self.dim_channel] h4 = tf.nn.conv2d_transpose(h3, self.gen_W4, output_shape=output_shape_l4, strides=[1,2,2,1]) x = tf.nn.sigmoid(h4) return Z, Y, x
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs): tf.reset_default_graph() self.n_class = n_class self.summaries = kwargs.get("summaries", True) self.x = tf.placeholder("float", shape=[None, None, None, channels]) self.y = tf.placeholder("float", shape=[None, None, None, n_class]) self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability) logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs) self.cost = self._get_cost(logits, cost, cost_kwargs) self.gradients_node = tf.gradients(self.cost, self.variables) self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]), tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class]))) self.predicter = pixel_wise_softmax_2(logits) self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
def __init__(self, name, size_input__layer, size_hidden_layer, size_output_layer, l2_coeff, keep_prob, optimizer='SGD'): """Make new tensors and connect them.""" self.size_input__layer = size_input__layer self.size_hidden_layer = size_hidden_layer self.size_output_layer = size_output_layer self.keep_prob = keep_prob self.input__placeholder = tf.placeholder(tf.float32, shape=(None, size_input__layer)) self.answer_placeholder = tf.placeholder(tf.float32, shape=(None, size_output_layer)) self.learning_rate = tf.placeholder(tf.float32) self.inference_proc, l2_proc = self.inference(self.input__placeholder, name) self.loss_proc = NN.loss(self.inference_proc, l2_proc, l2_coeff, self.answer_placeholder) self.training_proc = NN.training(self.loss_proc, self.learning_rate, optimizer)
def _build_graph(self, image_size): self.image_size = image_size self.images = tf.placeholder(tf.float32, shape = (None, image_size, image_size, 3)) images_mini = tf.image.resize_images(self.images, size = (int(image_size/4), int(image_size/4))) self.images_blur = tf.image.resize_images(images_mini, size = (image_size, image_size)) self.net = U_Net(output_ch = 3, block_fn = 'origin') self.images_reconst = self.net(self.images_blur, reuse = False) # self.image_reconst can be [-inf +inf], so need to clip its value if visualize them as images. self.loss = tf.reduce_mean((self.images_reconst - self.images)**2) self.opt = tf.train.AdamOptimizer()\ .minimize(self.loss, var_list = self.net.vars) self.saver = tf.train.Saver() self.sess.run(tf.global_variables_initializer())
def predictPL(self): B = self.flags.batch_size W,H,C = self.flags.width, self.flags.height, self.flags.color inputs = tf.placeholder(dtype=tf.float32,shape=[None,H,W,C]) #with open(self.flags.pred_path,'w') as f: # pass self._build(inputs,resize=False) counter = 0 with tf.Session() as sess: self.sess = sess sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) for imgs,imgnames in self.DATA.test_generator(): pred = sess.run(self.logit,feed_dict={inputs:imgs}) np.save("%s/%d.npy"%(self.flags.pred_path,counter),{"pred":pred,"name":imgnames}) counter+=len(imgs) if counter/B%10 ==0: print_mem_time("%d images predicted"%counter) # train with placeholders
def _build(self): V = self.V M = self.flags.embedding_size # 64 H = self.flags.num_units C = self.flags.classes D = self.flags.d2v_size # embedding for d2v netname = "D2V" with tf.variable_scope(netname): self.inputs = tf.placeholder(dtype=tf.int32,shape=[None]) #[B] layer_name = "{}/embedding".format(netname) x = self._get_embedding(layer_name, self.inputs, V, D, reuse=False) # [B, S, M] netname = "NN" cell_name = self.flags.cell H1,H2 = 32,16 with tf.variable_scope(netname): net = self._fc(x, fan_in=D, fan_out=H1, layer_name="%s/fc1"%netname, activation='relu') net = self._dropout(net) net = self._fc(net, fan_in=H1, fan_out=H2, layer_name="%s/fc2"%netname, activation='relu') net = self._dropout(net) net = self._fc(net, fan_in=H2, fan_out=C, layer_name="%s/fc3"%netname, activation=None) self.logit = net
def _build(self): netname = "CBOW" W = self.flags.window_size M = self.flags.embedding_size V = self.V # vocabulary size, should be passed from DB H = 128 # the real window is W*2 + 1 with tf.variable_scope(netname): self.inputs = tf.placeholder(tf.int32, shape=(None,W*2+1)) # [B, W*2+1] layer_name = "{}/embedding".format(netname) x = self._get_embedding(layer_name, self.inputs, V, M, reuse=False) # [B, W*2+1, M] x = tf.reshape(x,[tf.shape(x)[0],tf.shape(x)[1]*tf.shape(x)[2]]) # [B,(W*2+1)*M] layer_name = "{}/fc1".format(netname) net = self._fc(x, fan_in=M*(W*2+1), fan_out=H, layer_name=layer_name, activation='relu') # [B, H] layer_name = "{}/fc2".format(netname) net = self._fc(net, fan_in=H, fan_out=2, layer_name=layer_name, activation=None) # [B, 2] self.logit = net
def rnn_extend(sess, coder, inputs, skips=None, length=1): """ inputs is batch_size x n_consecutive_seqs x n_visible x seq_length. """ shape = inputs.shape n_seqs = shape[1] batch = tf.placeholder(coder.dtype, name='batch_seq', shape=(shape[0], shape[2], shape[3])) coder.reset_state() output = coder.recode(batch, store=True, skips=skips) outputs = [] for index in range(n_seqs): batch_seq = inputs[:, index, :, :] o, s = sess.run([output, coder.get_state()], feed_dict={batch: batch_seq}) outputs.append(o) outputs.append(coder.predict_sequence(None, s['hidden'], length=length*shape[3]).eval()) return np.array(outputs)
def __init__(self, sigma=0.1, beta_sampling=True, **kwargs): """ sigma: Standard deviation of input data, for use in sampling. beta_sampling: Use beta distribution for sampling, instead of Gaussian. """ RBM.__init__(self, **kwargs) if not kwargs.get('fromfile'): self.sigma = sigma self.beta_sampling = beta_sampling if self.sigma is None: raise AssertionError('Need to supply sigma param.') self.hidden = tf.placeholder(self.dtype, name='hidden', shape=[None, self.n_hidden]) self.mean_v = tf.sigmoid(tf.matmul(self.hidden, self.params['W'], transpose_b=True) + self.params['bvis'])
def init(): #1. assign value to fields vocab_size=1000 d_model = 512 d_k = 64 d_v = 64 sequence_length = 5*10 h = 8 batch_size=4*32 initializer = tf.random_normal_initializer(stddev=0.1) # 2.set values for Q,K,V vocab_size=1000 embed_size=d_model Embedding = tf.get_variable("Embedding_E", shape=[vocab_size, embed_size],initializer=initializer) input_x = tf.placeholder(tf.int32, [batch_size,sequence_length], name="input_x") #[4,10] print("input_x:",input_x) embedded_words = tf.nn.embedding_lookup(Embedding, input_x) #[batch_size*sequence_length,embed_size] Q = embedded_words # [batch_size*sequence_length,embed_size] K_s = embedded_words # [batch_size*sequence_length,embed_size] num_layer=6 mask = get_mask(batch_size, sequence_length) #3. get class object encoder_class=Encoder(d_model,d_k,d_v,sequence_length,h,batch_size,num_layer,Q,K_s,mask=mask) #Q,K_s,embedded_words return encoder_class,Q,K_s
def create_critic_net(self, num_states=4, num_actions=1): N_HIDDEN_1 = 400 N_HIDDEN_2 = 300 critic_state_in = tf.placeholder("float",[None,num_states]) critic_action_in = tf.placeholder("float",[None,num_actions]) W1_c = tf.Variable(tf.random_uniform([num_states,N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states))) B1_c = tf.Variable(tf.random_uniform([N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states))) W2_c = tf.Variable(tf.random_uniform([N_HIDDEN_1,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1+num_actions),1/math.sqrt(N_HIDDEN_1+num_actions))) W2_action_c = tf.Variable(tf.random_uniform([num_actions,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1+num_actions),1/math.sqrt(N_HIDDEN_1+num_actions))) B2_c= tf.Variable(tf.random_uniform([N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1+num_actions),1/math.sqrt(N_HIDDEN_1+num_actions))) W3_c= tf.Variable(tf.random_uniform([N_HIDDEN_2,1],-0.003,0.003)) B3_c= tf.Variable(tf.random_uniform([1],-0.003,0.003)) H1_c=tf.nn.softplus(tf.matmul(critic_state_in,W1_c)+B1_c) H2_c=tf.nn.tanh(tf.matmul(H1_c,W2_c)+tf.matmul(critic_action_in,W2_action_c)+B2_c) critic_q_model=tf.matmul(H2_c,W3_c)+B3_c return W1_c, B1_c, W2_c, W2_action_c, B2_c, W3_c, B3_c, critic_q_model, critic_state_in, critic_action_in
def create_actor_net(self, num_states=4, num_actions=1): """ Network that takes states and return action """ N_HIDDEN_1 = 400 N_HIDDEN_2 = 300 actor_state_in = tf.placeholder("float",[None,num_states]) W1_a=tf.Variable(tf.random_uniform([num_states,N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states))) B1_a=tf.Variable(tf.random_uniform([N_HIDDEN_1],-1/math.sqrt(num_states),1/math.sqrt(num_states))) W2_a=tf.Variable(tf.random_uniform([N_HIDDEN_1,N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1),1/math.sqrt(N_HIDDEN_1))) B2_a=tf.Variable(tf.random_uniform([N_HIDDEN_2],-1/math.sqrt(N_HIDDEN_1),1/math.sqrt(N_HIDDEN_1))) W3_a=tf.Variable(tf.random_uniform([N_HIDDEN_2,num_actions],-0.003,0.003)) B3_a=tf.Variable(tf.random_uniform([num_actions],-0.003,0.003)) H1_a=tf.nn.softplus(tf.matmul(actor_state_in,W1_a)+B1_a) H2_a=tf.nn.tanh(tf.matmul(H1_a,W2_a)+B2_a) actor_model=tf.matmul(H2_a,W3_a) + B3_a return W1_a, B1_a, W2_a, B2_a, W3_a, B3_a, actor_state_in, actor_model
def placeholder_inputs(batch_size): """Generate placeholder variables to represent the input tensors. These placeholders are used as inputs by the rest of the model building code and will be fed from the downloaded data in the .run() loop, below. Args: batch_size: The batch size will be baked into both placeholders. Returns: images_placeholder: Images placeholder. labels_placeholder: Labels placeholder. """ # Note that the shapes of the placeholders match the shapes of the full # image and label tensors, except the first dimension is now batch_size # rather than the full size of the train or test data sets. images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, c3d_model.NUM_FRAMES_PER_CLIP, c3d_model.CROP_SIZE, c3d_model.CROP_SIZE, c3d_model.CHANNELS)) labels_placeholder = tf.placeholder(tf.int64, shape=(batch_size)) return images_placeholder, labels_placeholder
def test_vgg(): vgg = Vgg16() image_tensor = tf.placeholder(tf.float32) with tf.Session() as sess: vgg.build(image_tensor) init = tf.initialize_all_variables() sess.run(init) load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess) for v in tf.get_collection(tf.GraphKeys.VARIABLES): print_op = tf.Print(v, [v], message=v.name, first_n=10) sess.run(print_op) roidb = RoiDb('val.txt', 2007) batch_gen = BatchGenerator(roidb) for i in range(10): image, scale, bboxes = batch_gen.next_batch() print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
def main(): roidb = RoiDb('val.txt', 2007) batch_gen = BatchGenerator(roidb) image_tensor = tf.placeholder(dtype=tf.float32) scale_tensor = tf.placeholder(dtype=tf.float32) bboxes_tensor = tf.placeholder(dtype=tf.float32) p_op = tf.Print(image_tensor, [tf.shape(image_tensor), scale_tensor, bboxes_tensor]) sess = tf.Session() init = tf.initialize_all_variables() sess.run(init) coord = tf.train.Coordinator() queue_threads = queue_runner.start_queue_runners(sess, coord=coord) for i in range(10): if coord.should_stop(): break image, scale, bboxes = batch_gen.next_batch() sess.run([p_op], feed_dict={image_tensor: image, scale_tensor: scale, bboxes_tensor:bboxes}) coord.request_stop() coord.join(queue_threads)
def test_rpn(): vgg = Vgg16() rpn = RpnNet() image_tensor = tf.placeholder(tf.float32) with tf.Session() as sess: vgg.build(image_tensor) rpn.build(vgg.conv5_3, None) init = tf.initialize_all_variables() sess.run(init) load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess) roidb = RoiDb('val.txt', 2007) batch_gen = BatchGenerator(roidb) for i in range(10): image, scale, bboxes = batch_gen.next_batch() feature_shape = tf.shape(rpn.rpn_cls_score_reshape) print_feat_shape = tf.Print(feature_shape, [feature_shape], summarize=5) sess.run(print_feat_shape, feed_dict={image_tensor: image}) # print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
def __init__(self): # Initializes function that decodes RGB png data. self._decode_png_data = tf.placeholder(dtype=tf.string) self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def initialize(self): with tf.variable_scope(self.name): self.keepProb = tf.placeholder('float') # Variable to hold the dropout probability
def make_skipgram_softmax_loss(embeddings_matrix, vocabulary_size, vector_size): vectors = tf.get_variable('vectors', (vocabulary_size, vector_size), dtype=tf.float32, initializer=tf.constant_initializer(embeddings_matrix)) minibatch = tf.placeholder(shape=(None, 2), dtype=tf.int32) center_word_vector = tf.nn.embedding_lookup(vectors, minibatch[:,0]) yhat = tf.matmul(center_word_vector, vectors, transpose_b=True) predict_word = minibatch[:,1] loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=predict_word, logits=yhat) loss = tf.reduce_mean(loss) return vectors, minibatch, loss
def add_output_placeholders(self): self.top_placeholder = tf.placeholder(tf.int32, shape=(None,)) self.special_label_placeholder = tf.placeholder(tf.int32, shape=(None, MAX_SPECIAL_LENGTH)) self.part_function_placeholders = dict() self.part_sequence_placeholders = dict() self.part_sequence_length_placeholders = dict() for part in ('trigger', 'query', 'action'): self.part_function_placeholders[part] = tf.placeholder(tf.int32, shape=(None,)) self.part_sequence_placeholders[part] = tf.placeholder(tf.int32, shape=(None, MAX_PRIMITIVE_LENGTH)) self.part_sequence_length_placeholders[part] = tf.placeholder(tf.int32, shape=(None,))
def add_input_placeholders(self): self.input_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length)) self.input_length_placeholder = tf.placeholder(tf.int32, shape=(None,)) self.constituency_parse_placeholder = tf.placeholder(tf.bool, shape=(None, 2*self.config.max_length-1))
def add_output_placeholders(self): self.output_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length)) self.output_length_placeholder = tf.placeholder(tf.int32, shape=(None,))
def add_extra_placeholders(self): self.batch_number_placeholder = tf.placeholder(tf.int32, shape=()) self.dropout_placeholder = tf.placeholder(tf.float32, shape=())