我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.global_variables()。
def _start_session(self): """ Starts the Tensorflow Session :return: None """ self.sess.run(tf.global_variables_initializer()) # initialize the saver node # print tf.GraphKeys.GLOBAL_VARIABLES self.saver = tf.train.Saver(tf.global_variables()) # get the latest checkpoint last_checkpoint_path = self.checkpointer.get_last_checkpoint() if last_checkpoint_path is not None: print 'Previous saved tensorflow objects found... Extracting...' # restore the tensorflow variables self.saver.restore(self.sess, last_checkpoint_path) print 'Extraction Complete. Moving Forward....'
def savable_variables(self): """Returns a list/dict of savable variables to pass to tf.train.Saver.""" params = {} for v in tf.global_variables(): assert (v.name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0/') or v.name in ('global_step:0', 'loss_scale:0', 'loss_scale_normal_steps:0')), ( 'Invalid global variable: %s' % v) # We store variables in the checkpoint with the shadow variable prefix # removed so we can evaluate checkpoints in non-distributed replicated # mode. The checkpoints can also be loaded for training in # distributed_replicated mode. name = self._strip_port(self._remove_shadow_var_prefix_if_present(v.name)) params[name] = v for v in tf.local_variables(): # Non-trainable variables, such as batch norm moving averages, do not have # corresponding global shadow variables, so we add them here. Trainable # local variables have corresponding global shadow variables, which were # added in the global variable loop above. if v.name.startswith('v0/') and v not in tf.trainable_variables(): params[self._strip_port(v.name)] = v return params
def test_remap_var_list(self): # Get a test `var_list` {var.name: var} var_list = {var.op.name: var for var in tf.global_variables()} # Specify mapping from old var names to new ones. mapping = {'model_0/Weights': 'model_0/Filters'} self.dbinterface.load_param_dict = mapping # Perform the mapping. mapped_vars = self.dbinterface.remap_var_list(var_list) # Confirm that the mapping has been done correctly. for name, var in mapped_vars.items(): self.log.info('{} mapped to {}'.format(name, var.op.name)) if name == 'model_0/Filters': self.assertEqual(name, mapping[var.op.name])
def __init__(self, session, model_scope, result_dir, result_file, k=1): """ Args: model_scope: The variable_scope used for the trained model to be restored. session: The TensorFlow session used to run the prediction. result_dir: The full path to the folder in which the result file locates. result_file: The file that saves the training results. k: Optional. Number of elements to be predicted. """ tf.train.import_meta_graph(os.path.join(result_dir, result_file + ".meta")) all_vars = tf.global_variables() model_vars = [var for var in all_vars if var.name.startswith(model_scope)] saver = tf.train.Saver(model_vars) saver.restore(session, os.path.join(result_dir, result_file)) # Retrieve the Ops we 'remembered'. logits = tf.get_collection(model_scope+"logits")[0] self.images_placeholder = tf.get_collection(model_scope+"images")[0] self.keep_prob_placeholder = tf.get_collection(model_scope+"keep_prob")[0] # Add an Op that chooses the top k predictions. Apply softmax so that # we can have the probabilities (percentage) in the output. self.eval_op = tf.nn.top_k(tf.nn.softmax(logits), k=k) self.session = session
def tracking(dataset, seq, display, restore_path): train_data = reader.read_seq(dataset, seq) im_size = proc.load_image(train_data.data[seq].frames[0]).shape[:2] config = Config(im_size) # create session and saver gpu_config = tf.ConfigProto(allow_soft_placement=True) sess = tf.InteractiveSession(config=gpu_config) # load model, weights model = MDNet(config) model.build_generator(config.batch_size, reuse=False, dropout=True) tf.global_variables_initializer().run() # create saver saver = tf.train.Saver([v for v in tf.global_variables() if ('conv' in v.name or 'fc4' in v.name or 'fc5' in v.name) \ and 'lr_rate' not in v.name], max_to_keep=50) # restore from model saver.restore(sess, restore_path) # run mdnet mdnet_run(sess, model, train_data.data[seq].gts[0], train_data.data[seq].frames, config, display)
def load(self, model_name, verbose=True): """Load TensorFlow model from file @model_name: save file names @verbose: be talkative? """ self.load_info(model_name) self._build() load_dict = self.save_dict or tf.global_variables() saver = tf.train.Saver(load_dict) ckpt = tf.train.get_checkpoint_state('./') if ckpt and ckpt.model_checkpoint_path: saver.restore(self.session, ckpt.model_checkpoint_path) if verbose: print("[{0}] Loaded model <{1}>".format(self.name, model_name)) else: raise Exception("[{0}] No model found at <{1}>".format( self.name, model_name ))
def initialize_uninitialized_global_variables(sess): """ Only initializes the variables of a TensorFlow session that were not already initialized. :param sess: the TensorFlow session :return: """ # List all global variables global_vars = tf.global_variables() # Find initialized status for all variables is_var_init = [tf.is_variable_initialized(var) for var in global_vars] is_initialized = sess.run(is_var_init) # List all variables that were not initialized previously not_initialized_vars = [var for (var, init) in zip(global_vars, is_initialized) if not init] # Initialize all uninitialized variables found, if any if len(not_initialized_vars): sess.run(tf.variables_initializer(not_initialized_vars))
def build_model(self): self.model = classmap[FLAGS.model_type](hidden_size=FLAGS.hidden, vocab_size=self.vocab_size, encoder_in_size=self.data.feats.shape[-1], encoder_in_length=self.data.feats.shape[1], decoder_in_length=self.data.decoder_in.shape[-1] - 1, word2vec_weight=self.w2v_W, embedding_size=FLAGS.embedding_dim, neg_sample_num=self.sample_num, start_id=self.vocab_processor._mapping['<BOS>'], end_id=self.vocab_processor._mapping['<EOS>'], Bk=FLAGS.K) self.global_step = tf.Variable(0, name='global_step', trainable=False) self.optimizer = tf.train.RMSPropOptimizer(FLAGS.lr) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self.model.cost, tvars), 5) self.updates = self.optimizer.apply_gradients( zip(grads, tvars), global_step=self.global_step) self.saver = tf.train.Saver(tf.global_variables())
def demo(lr_image, hr_image): model_sr = LapSRN(mode = 'demo') hr_images_fake, residuals = model_sr.construct_net(lr_image, hr_image) ckpt_path = tf.train.latest_checkpoint('checkpoint') print(ckpt_path) restorer = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: restorer.restore(sess, ckpt_path) hr_image_fake_level_2 = hr_images_fake['hr_image_fake_level_1']+residuals['residual_level_1'] hr_image_fake_level_2 = tf.clip_by_value(hr_image_fake_level_2, 0, 1) hr_image_fake_level_2 = sess.run(hr_image_fake_level_2) hr_image_fake_level_2 = hr_image_fake_level_2.squeeze() lr_image = sess.run(lr_image) lr_image = lr_image.squeeze() hr_image = sess.run(hr_image) psnr_value = psnr(hr_image.squeeze(), hr_image_fake_level_2.squeeze()) print(psnr_value) imshow(hr_image.squeeze()) imshow(hr_image_fake_level_2)
def demo(img_path): lr_img, hr_img = imgread(img_path) model = pix2pix_model(cfg) model.test_model(lr_img, hr_img) ckpt_path = tf.train.latest_checkpoint('checkpoint') restorer = tf.train.Saver(tf.global_variables()) with tf.Session() as sess: restorer.restore(sess, ckpt_path) hr_image_fake = model.fake_hr_image hr_image_fake = tf.clip_by_value(hr_image_fake, 0, 1) hr_image_fake = sess.run(hr_image_fake) hr_image_fake = hr_image_fake.squeeze() hr_image = sess.run(hr_img) psnr_value = psnr(hr_image.squeeze(), hr_image_fake.squeeze()) print(psnr_value) imshow(hr_image_fake) imshow(hr_image.squeeze())
def _initialize_session(self): """Initialize session, variables, saver""" config = tf.ConfigProto() # restrict model GPU memory utilization to min required config.gpu_options.allow_growth = True self.sess = tf.Session(config=config) tf_ver = int(tf.__version__.split('.')[1]) if TF_VERSION <= 0.10: self.sess.run(tf.initialize_all_variables()) logswriter = tf.train.SummaryWriter else: self.sess.run(tf.global_variables_initializer()) logswriter = tf.summary.FileWriter self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=0) self.summary_writer = logswriter(self.logs_path, self.sess.graph) # (Updated)
def _initialize_variables(): if hasattr(tf, 'global_variables'): variables = tf.global_variables() else: variables = tf.all_variables() uninitialized_variables = [] for v in variables: if not hasattr(v, '_keras_initialized') or not v._keras_initialized: uninitialized_variables.append(v) v._keras_initialized = True if uninitialized_variables: sess = get_session() if hasattr(tf, 'variables_initializer'): sess.run(tf.variables_initializer(uninitialized_variables)) else: sess.run(tf.initialize_variables(uninitialized_variables))
def to_darknet(self): darknet_ckpt = self.darknet with self.graph.as_default() as g: for var in tf.global_variables(): name = var.name.split(':')[0] var_name = name.split('-') l_idx = int(var_name[0]) w_sig = var_name[1].split('/')[-1] l = darknet_ckpt.layers[l_idx] l.w[w_sig] = var.eval(self.sess) for layer in darknet_ckpt.layers: for ph in layer.h: layer.h[ph] = None return darknet_ckpt
def get_restore_op(self): """ Get variable restoring ngraph op from TF model checkpoint Returns: A `ng.doall` op that restores the stored weights in TF model checkpoint """ if self._graph is None: raise ValueError("self._graph is None, import meta_graph first.") if self._checkpoint_path is None: raise ValueError("self._checkpoint_path is None, please specify" "checkpoint_path while importing meta_graph.") with self._graph.as_default(): tf_variables = tf.global_variables() ng_variables = self.get_op_handle(tf_variables) ng_restore_ops = [] with tf.Session() as sess: checkpoint_path = os.path.join(os.getcwd(), self._checkpoint_path) self.saver.restore(sess, checkpoint_path) for tf_variable, ng_variable in zip(tf_variables, ng_variables): val = sess.run(tf_variable) ng_restore_ops.append(ng.assign(ng_variable, val)) return ng.doall(ng_restore_ops)
def testCustomGetter(self): """Check that custom getters work appropriately.""" def custom_getter(getter, *args, **kwargs): kwargs["trainable"] = False return getter(*args, **kwargs) inputs = tf.placeholder(tf.float32, shape=[self.batch_size, self.in_size]) # Make w and b non-trainable. lin1 = snt.Linear(output_size=self.out_size, custom_getter=custom_getter) lin1(inputs) self.assertEqual(0, len(tf.trainable_variables())) self.assertEqual(2, len(tf.global_variables())) # Make w non-trainable. lin2 = snt.Linear(output_size=self.out_size, custom_getter={"w": custom_getter}) lin2(inputs) self.assertEqual(1, len(tf.trainable_variables())) self.assertEqual(4, len(tf.global_variables()))
def _get_vars_to_collections(variables): """Returns a dict mapping variables to the collections they appear in.""" var_to_collections = collections.defaultdict(lambda: []) if isinstance(variables, dict): variables = list(v for _, v in variable_map_items(variables)) for graph in set(v.graph for v in variables): for collection_name in list(graph.collections): entries = set(entry for entry in graph.get_collection(collection_name) if isinstance(entry, tf.Variable)) # For legacy reasons, tf.GraphKeys.GLOBAL_VARIABLES == "variables". # Correcting for this here, to avoid confusion. if collection_name == tf.GraphKeys.GLOBAL_VARIABLES: collection_name = "global_variables" for var in entries.intersection(variables): var_to_collections[var].append(collection_name) return var_to_collections
def initialize_variables(self, save_file=None): self.session.run(tf.global_variables_initializer()) if save_file is not None: try: self.saver.restore(self.session, save_file) except: # some wizardry here... basically, only restore variables # that are in the save file; otherwise, initialize them normally. from tensorflow.python.framework import meta_graph meta_graph_def = meta_graph.read_meta_graph_file(save_file + '.meta') stored_var_names = set([n.name for n in meta_graph_def.graph_def.node if n.op == 'VariableV2']) print(stored_var_names) var_list = [v for v in tf.global_variables() if v.op.name in stored_var_names] # initialize all of the variables self.session.run(tf.global_variables_initializer()) # then overwrite the ones we have in the save file # by using a throwaway saver, saved models are automatically # "upgraded" to the latest graph definition. throwaway_saver = tf.train.Saver(var_list=var_list) throwaway_saver.restore(self.session, save_file)
def dump_vars(sess, trainable_scopes=None): all_vars = set(tf.global_variables()) trainable_vars = set(trainable_variables(trainable_scopes)) non_trainable_vars = all_vars.difference(trainable_vars) def _dump_set(var_set): names_vars = map(lambda v: (v.name, v), var_set) for n, v in sorted(names_vars, key=lambda nv: nv[0]): print("%s=%s" % (n, sess.run(v))) print("Variable values:") print("-----------") print("\n---Trainable vars:") _dump_set(trainable_vars) print("\n---Non Trainable vars:") _dump_set(non_trainable_vars) print("-----------")
def show_vars(logger=None, trainable_scopes=None): printer = logger.info if logger is not None else print all_vars = set(tf.global_variables()) trainable_vars = set(trainable_variables(trainable_scopes)) non_trainable_vars = all_vars.difference(trainable_vars) local_vars = set(tf.local_variables()) class nonlocal: pass nonlocal.num_params = {} def show_var_info(vars, var_type): printer('\n---%s vars in model:' % var_type) name_shapes = map(lambda v: (v.name, v.get_shape()), vars) total_params = 0 for n, s in sorted(name_shapes, key=lambda ns: ns[0]): printer('%s %s' % (n, s)) total_params += np.prod(s.as_list()) nonlocal.num_params[var_type] = total_params show_var_info(trainable_vars, 'Trainable') show_var_info(non_trainable_vars, 'Non Trainable') show_var_info(local_vars, 'Local') printer('Total number of params:') printer(pprint.pformat(nonlocal.num_params))
def save(self, name): directory = 'saves/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) directory += 'iteration_{}'.format(self.timestep) + '/' if not os.path.exists(directory): os.makedirs(directory) for i, tensor in enumerate(tf.global_variables()): value = self.sess.run(tensor) np.save(directory + 'weight_{}'.format(i), value) if self.scale: np.save(directory + 'sums', self.sums) np.save(directory + 'sumsquares', self.sumsqrs) np.save(directory + 'sumtime', self.sumtime) np.save(directory + 'timestep', np.array([self.timestep])) np.save(directory + 'train_scores', np.array(self.train_scores)) np.save(directory + 'test_scores', np.array(self.test_scores)) print("Agent successfully saved in folder {}".format(directory))
def load(self, name, iteration=None): try: directory = 'saves/' + name + '/' if not os.path.exists(directory): print('That directory does not exist!') raise Exception if iteration is None: iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]]) directory += 'iteration_{}'.format(iteration) + '/' for i, tensor in enumerate(tf.global_variables()): arr = np.load(directory + 'weight_{}.npy'.format(i)) self.sess.run(tensor.assign(arr)) if self.scale: self.sums = np.load(directory + 'sums.npy') self.sumsqrs = np.load(directory + 'sumsquares.npy') self.sumtime = np.load(directory + 'sumtime.npy') self.timestep = np.load(directory + 'timestep.npy')[0] self.train_scores = np.load(directory + 'train_scores.npy').tolist() self.test_scores = np.load(directory + 'test_scores.npy').tolist() print("Agent successfully loaded from folder {}".format(directory)) except: print("Something is wrong, loading failed")
def save(self, name): directory = 'saves/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) directory += 'iteration_{}'.format(self.timestep) + '/' if not os.path.exists(directory): os.makedirs(directory) for i, w in enumerate(tf.global_variables()): np.save(directory + 'weight_{}'.format(i), self.sess.run(w)) if self.scale: np.save(directory + 'sums', self.sums) np.save(directory + 'sumsquares', self.sumsqrs) np.save(directory + 'sumtime', self.sumtime) np.save(directory + 'timestep', np.array([self.timestep])) np.save(directory + 'train_scores', np.array(self.train_scores)) np.save(directory + 'test_scores', np.array(self.test_scores)) print("Agent successfully saved in folder {}".format(directory))
def save(self, name): directory = 'saves/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) directory += 'iteration_{}'.format(self.timestep) + '/' if not os.path.exists(directory): os.makedirs(directory) for i, w in enumerate(tf.global_variables()): np.save(directory + 'weight_{}'.format(i), self.sess.run(w)) if self.scale!='off': np.save(directory + 'sums', self.sums) np.save(directory + 'sumsquares', self.sumsqrs) np.save(directory + 'sumtime', self.sumtime) np.save(directory + 'timestep', np.array([self.timestep])) np.save(directory + 'train_scores', np.array(self.train_scores)) np.save(directory + 'test_scores', np.array(self.test_scores)) print("Agent successfully saved in folder {}".format(directory))
def save(self, name): directory = 'saves/' + name + '/' if not os.path.exists(directory): os.makedirs(directory) directory += 'iteration_{}'.format(self.timestep) + '/' if not os.path.exists(directory): os.makedirs(directory) for i, tensor in enumerate(tf.global_variables()): value = self.sess.run(tensor) np.save(directory + 'weight_{}'.format(i), value) if self.scale != 'off': np.save(directory + 'sums', self.sums) np.save(directory + 'sumsquares', self.sumsqrs) np.save(directory + 'sumtime', self.sumtime) np.save(directory + 'timestep', np.array([self.timestep])) np.save(directory + 'train_scores', np.array(self.train_scores)) np.save(directory + 'test_scores', np.array(self.test_scores)) print("Agent successfully saved in folder {}".format(directory))
def load(self, name, iteration=None): try: directory = 'saves/' + name + '/' if not os.path.exists(directory): print('That directory does not exist!') raise Exception if iteration is None: iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]]) directory += 'iteration_{}'.format(iteration) + '/' for i, tensor in enumerate(tf.global_variables()): arr = np.load(directory + 'weight_{}.npy'.format(i)) self.sess.run(tensor.assign(arr)) if self.scale != 'off': self.sums = np.load(directory + 'sums.npy') self.sumsqrs = np.load(directory + 'sumsquares.npy') self.sumtime = np.load(directory + 'sumtime.npy') self.timestep = np.load(directory + 'timestep.npy')[0] self.train_scores = np.load(directory + 'train_scores.npy').tolist() self.test_scores = np.load(directory + 'test_scores.npy').tolist() print("Agent successfully loaded from folder {}".format(directory)) except: print("Something is wrong, loading failed")
def model_initilization(self, cfg): ############################################################################################################################################ def initialization(): var_list = tf.global_variables() for var in var_list: self.sess.run(tf.variables_initializer([var]), feed_dict={self.z: self.sample_z[:cfg.iBatchSize], self.images_lab: self.sample_images[:cfg.iBatchSize], self.fInputNoise: cfg.fInputNoise}) print(var.op.name) #self.sess.run(tf.initialize_all_tables(), feed_dict={self.z: self.sample_z[:cfg.iBatchSize], self.images_lab: self.sample_images[:cfg.iBatchSize], self.fInputNoise: cfg.fInputNoiseBiG}) print('optimizor initialization') if cfg.bLoadCheckpoint: if self.load(cfg): print(" [*] Load SUCCESS") else: print(" [!] Load failed...") initialization() else: initialization()
def main(): """Create the model and start the training.""" args = get_arguments() # Default image. image_batch = tf.constant(0, tf.float32, shape=[1, 321, 321, 3]) # Create network. net = DeepLabResNetModel({'data': image_batch}) var_list = tf.global_variables() # Set up tf session and initialize variables. config = tf.ConfigProto() config.gpu_options.allow_growth = True with tf.Session(config=config) as sess: init = tf.global_variables_initializer() sess.run(init) # Loading .npy weights. net.load(args.npy_path, sess) # Saver for converting the loaded weights into .ckpt. saver = tf.train.Saver(var_list=var_list, write_version=1) save(saver, sess, args.save_dir)
def build_matchnet(self): self.sentence_fc2 = self.sentencenet(self.tfidf_feat, reuse=False) #self.sentence_fc2 = self.sentence_concat(self.tfidf_feat, self.lda_feat, reuse=False) self.image_fc2 = self.imagenet(self.image_feat, skip=self.is_skip, reuse=False) # compute loss if self.is_training: # triplet loss #sentence_fc2_neg = self.sentencenet(self.sentence_feat_neg, reuse=True) #image_fc2_neg = self.imagenet(self.image_feat_neg, skip=self.is_skip, reuse=True) #self.image_center_triplet_loss = self.triplet_loss(self.image_fc2, self.sentence_fc2, sentence_fc2_neg) #self.sentence_center_triplet_loss = self.triplet_loss(self.sentence_fc2, self.image_fc2, image_fc2_neg) # top k triplet loss self.sentence_center_triplet_loss, self.image_center_triplet_loss = self.top_K_loss( self.sentence_fc2, self.image_fc2) self.reg_loss = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) # reg loss and total loss self.total_loss = tf.add_n([self.image_center_triplet_loss, self.sentence_center_triplet_loss] + self.reg_loss) self.saver = tf.train.Saver(max_to_keep=30) self.t_var = tf.trainable_variables() self.g_var = tf.global_variables() self.img_var = [var for var in self.t_var if 'image' in var.name]
def restore_inception_resnet_variables_from_weight(sess, weights_path): adam_vars = [var for var in tf.global_variables() if 'Adam' in var.name or 'beta1_power' in var.name or 'beta2_power' in var.name] uninit_vars = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionResnetV2/Conv2d_1a_3x3') + adam_vars init_op = tf.variables_initializer(uninit_vars) variables_to_restore = slim.get_variables_to_restore( exclude=['InceptionResnetV2/Conv2d_1a_3x3']) for var in uninit_vars: if var in variables_to_restore: variables_to_restore.remove(var) saver = tf.train.Saver(variables_to_restore) print 'Initializing new variables to train from downloaded inception resnet weights' sess.run(init_op) saver.restore(sess, weights_path) return 0
def train(self, train_X, train_Y, learning_rate, training_epochs, model_output_dir=None): n_samples = train_X.shape[0] # Mean squared error cost = tf.reduce_sum(tf.pow(self.model - self.vars['Y'], 2)) / (2 * n_samples) # Gradient descent optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) # Launch the graph with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(tf.global_variables()) # Fit all training data for epoch in range(training_epochs): for x, y in zip(train_X, train_Y): sess.run(optimizer, feed_dict={self.vars['X']: x, self.vars['Y']: y}) # Save model locally saver.save(sess, model_output_dir + 'model.ckpt') return
def print_all_variables(train_only=False): """Print all trainable and non-trainable variables without tl.layers.initialize_global_variables(sess) Parameters ---------- train_only : boolean If True, only print the trainable variables, otherwise, print all variables. """ if train_only: t_vars = tf.trainable_variables() print(" [*] printing trainable variables") else: try: # TF1.0 t_vars = tf.global_variables() except: # TF0.12 t_vars = tf.all_variables() print(" [*] printing global variables") for idx, v in enumerate(t_vars): print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name))
def get_variables_with_name(name, train_only=True, printable=False): """Get variable list by a given name scope. >>> dense_vars = tl.layers.get_variable_with_name('dense', True, True) """ print(" [*] geting variables with %s" % name) # tvar = tf.trainable_variables() if train_only else tf.all_variables() if train_only: t_vars = tf.trainable_variables() else: try: # TF1.0 t_vars = tf.global_variables() except: # TF0.12 t_vars = tf.all_variables() d_vars = [var for var in t_vars if name in var.name] if printable: for idx, v in enumerate(d_vars): print(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape()))) return d_vars
def build_discriminator(x_data, x_generated, keep_prob): x_data=tf.unstack(x_data,seq_size,1); x_generated=list(x_generated); x_in = tf.concat([x_data, x_generated],1); x_in=tf.unstack(x_in,seq_size,0); lstm_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.DropoutWrapper(tf.contrib.rnn.BasicLSTMCell(n_hidden), output_keep_prob=keep_prob) for _ in range(d_num_layers)]); with tf.variable_scope("dis") as dis: weights=tf.Variable(tf.random_normal([n_hidden, 1])); biases=tf.Variable(tf.random_normal([1])); outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, x_in, dtype=tf.float32); res=tf.matmul(outputs[-1], weights) + biases; y_data = tf.nn.sigmoid(tf.slice(res, [0, 0], [batch_size, -1], name=None)); y_generated = tf.nn.sigmoid(tf.slice(res, [batch_size, 0], [-1, -1], name=None)); d_params=[v for v in tf.global_variables() if v.name.startswith(dis.name)]; with tf.name_scope("desc_params"): for param in d_params: variable_summaries(param); return y_data, y_generated, d_params;
def initialize(): new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED get_session().run(tf.variables_initializer(new_variables)) ALREADY_INITIALIZED.update(new_variables)
def savable_variables(self): """Returns a list/dict of savable variables to pass to tf.train.Saver.""" return tf.global_variables()
def get_post_init_ops(self): # Copy initialized values for variables on GPU 0 to other GPUs. global_vars = tf.global_variables() var_by_name = dict([(v.name, v) for v in global_vars]) post_init_ops = [] for v in global_vars: split_name = v.name.split('/') # TODO(b/62630508): use more specific prefix than v or v0. if split_name[0] == 'v0' or not v.name.startswith('v'): continue split_name[0] = 'v0' copy_from = var_by_name['/'.join(split_name)] post_init_ops.append(v.assign(copy_from.read_value())) return post_init_ops
def savable_variables(self): """Return the set of variables used for saving/loading the model.""" params = [] for v in tf.global_variables(): split_name = v.name.split('/') if split_name[0] == 'v0' or not v.name.startswith('v'): params.append(v) return params
def get_post_init_ops(self): """Copy initialized values for variables to other devices.""" global_vars = tf.global_variables() var_by_name = dict([(v.name, v) for v in global_vars]) post_init_ops = [] for v in global_vars: split_name = v.name.split('/') # TODO(b/62630508): use more specific prefix than v or v0. if split_name[0] == 'v0' or not v.name.startswith('v'): continue split_name[0] = 'v0' copy_from = var_by_name['/'.join(split_name)] post_init_ops.append(v.assign(copy_from.read_value())) return post_init_ops
def initialize(self, sess): # Initial file lists are empty np_paths = [] ss_paths = [] # Fresh train directly from ImageNet weights print('Loading initial model weights from {:s}'.format(self.pretrained_model)) variables = tf.global_variables() # Initialize all variables first sess.run(tf.variables_initializer(variables, name='init')) var_keep_dic = self.get_variables_in_checkpoint_file(self.pretrained_model) # Get the variables to restore, ignoring the variables to fix variables_to_restore = self.net.get_variables_to_restore(variables, var_keep_dic) restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, self.pretrained_model) print('Loaded.') # Need to fix the variables before loading, so that the RGB weights are changed to BGR # For VGG16 it also changes the convolutional weights fc6 and fc7 to # fully connected weights self.net.fix_variables(sess, self.pretrained_model) print('Fixed.') last_snapshot_iter = 0 rate = cfg.TRAIN.LEARNING_RATE stepsizes = list(cfg.TRAIN.STEPSIZE) return rate, last_snapshot_iter, stepsizes, np_paths, ss_paths
def vars(self): return [var for var in tf.global_variables() if self.name in var.name]