我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.reset_default_graph()。
def __init__(self, channels=3, n_class=2, cost="cross_entropy", cost_kwargs={}, **kwargs): tf.reset_default_graph() self.n_class = n_class self.summaries = kwargs.get("summaries", True) self.x = tf.placeholder("float", shape=[None, None, None, channels]) self.y = tf.placeholder("float", shape=[None, None, None, n_class]) self.keep_prob = tf.placeholder(tf.float32) #dropout (keep probability) logits, self.variables, self.offset = create_conv_net(self.x, self.keep_prob, channels, n_class, **kwargs) self.cost = self._get_cost(logits, cost, cost_kwargs) self.gradients_node = tf.gradients(self.cost, self.variables) self.cross_entropy = tf.reduce_mean(cross_entropy(tf.reshape(self.y, [-1, n_class]), tf.reshape(pixel_wise_softmax_2(logits), [-1, n_class]))) self.predicter = pixel_wise_softmax_2(logits) self.correct_pred = tf.equal(tf.argmax(self.predicter, 3), tf.argmax(self.y, 3)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
def retrain(): print 'Start retraining' tf.reset_default_graph() policy_network = PolicyNetwork(scope = 'supervised_policy') f = open(relationPath) training_pairs = f.readlines() f.close() saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, 'models/policy_supervised_' + relation) print "sl_policy restored" episodes = len(training_pairs) if episodes > 300: episodes = 300 REINFORCE(training_pairs, policy_network, episodes) saver.save(sess, 'models/policy_retrained' + relation) print 'Retrained model saved'
def show_shrinkage(shrink_func,theta,**kwargs): tf.reset_default_graph() tf.set_random_seed(kwargs.get('seed',1) ) N = kwargs.get('N',500) L = kwargs.get('L',4) nsigmas = kwargs.get('sigmas',10) shape = (N,L) rvar = 1e-4 r = np.reshape( np.linspace(0,nsigmas,N*L)*math.sqrt(rvar),shape) r_ = tfcf(r) rvar_ = tfcf(np.ones(L)*rvar) xhat_,dxdr_ = shrink_func(r_,rvar_ ,tfcf(theta)) with tf.Session() as sess: sess.run( tf.global_variables_initializer() ) xhat = sess.run(xhat_) import matplotlib.pyplot as plt plt.figure(1) plt.plot(r.reshape(-1),r.reshape(-1),'y') plt.plot(r.reshape(-1),xhat.reshape(-1),'b') if kwargs.has_key('title'): plt.suptitle(kwargs['title']) plt.show()
def test_multikwargs(): tf.reset_default_graph() x = tf.placeholder(tf.int32, (), name="x") with tf.variable_scope("other"): x2 = tf.placeholder(tf.int32, (), name="x") z = 3 * x + 2 * x2 lin = function([x, x2], z, givens={x2: 0}) with single_threaded_session(): initialize() assert lin(2) == 6 assert lin(2, 2) == 10 expt_caught = False try: lin(x=2) except AssertionError: expt_caught = True assert expt_caught
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v2(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 299, 299 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def test_run_feed_dict(self): p = self.create_plan(loom_input_tensor=None) p.examples = [1] * 4 self.check_plan(p, []) # Test that we don't clobber a better checkpoint with a worse one. tf.reset_default_graph() self._ClearCachedSession() p = self.create_plan(loom_input_tensor=None) p.examples = [1] * 4 p.epochs = 1 p._loss_total = tf.constant(42.0) # We aren't using a managed session, so we need to run this ourselves. init_op = tf.global_variables_initializer() sv = p.create_supervisor() with self.test_session() as sess: sess.run(init_op) p.run(sv, sess) log_str = p.print_file.getvalue() self.assertNotIn('new best model saved', log_str)
def test_train_predict2(): ''' Test that the embedding_attention model works, with saving and loading of weights ''' import tempfile sp = SequencePattern() tempdir = tempfile.mkdtemp() ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir=tempdir, name="attention") tf.reset_default_graph() ts2s.train(num_epochs=1, num_points=1000, weights_output_fn=1, weights_input_fn=0) assert os.path.exists(ts2s.weights_output_fn) tf.reset_default_graph() ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir="DATA", name="attention", verbose=1) prediction, y = ts2s.predict(Xin=range(10), weights_input_fn=1) assert len(prediction==10) os.system("rm -rf %s" % tempdir)
def test_train_predict3(): ''' Test that a model trained on sequencees of one length can be used for predictions on other sequence lengths ''' import tempfile sp = SequencePattern("sorted", in_seq_len=10, out_seq_len=10) tempdir = tempfile.mkdtemp() ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir=tempdir, name="attention") tf.reset_default_graph() ts2s.train(num_epochs=1, num_points=1000, weights_output_fn=1, weights_input_fn=0) assert os.path.exists(ts2s.weights_output_fn) tf.reset_default_graph() sp = SequencePattern("sorted", in_seq_len=20, out_seq_len=8) tf.reset_default_graph() ts2s = TFLearnSeq2Seq(sp, seq2seq_model="embedding_attention", data_dir="DATA", name="attention", verbose=1) x = np.random.randint(0, 9, 20) prediction, y = ts2s.predict(x, weights_input_fn=1) assert len(prediction==8) os.system("rm -rf %s" % tempdir)
def test_main3(): ''' Integration test - training then prediction: attention model ''' import tempfile wfn = "tmp_weights.tfl" if os.path.exists(wfn): os.unlink(wfn) arglist = "-e 2 -o tmp_weights.tfl -v -v -v -v -m embedding_attention train 5000" arglist = arglist.split(' ') tf.reset_default_graph() ts2s = CommandLine(arglist=arglist) assert os.path.exists(wfn) arglist = "-i tmp_weights.tfl -v -v -v -v -m embedding_attention predict 1 2 3 4 5 6 7 8 9 0" arglist = arglist.split(' ') tf.reset_default_graph() ts2s = CommandLine(arglist=arglist) assert len(ts2s.prediction_results[0][0])==10 #-----------------------------------------------------------------------------
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v2(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV2/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} tf.initialize_all_variables().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 299, 299 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] feed_dict = {inputs: input_np} tf.initialize_all_variables().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048])
def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_5c'] feed_dict = {inputs: input_np} tf.initialize_all_variables().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def test_sgld_sparse(self): tf.reset_default_graph() z = tf.Variable(tf.zeros((5, 2)), dtype=tf.float32) idx = tf.placeholder(tf.int32) zi = tf.gather(z, idx) zloss = tf.square(zi - [10.0, 5.0]) sgld = SGLD(learning_rate=0.4) train_op_sgld = sgld.minimize(zloss) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) self.assertTrue(np.alltrue(sess.run(z) == 0.0)) sess.run(train_op_sgld, feed_dict={idx: 3}) zh = sess.run(z) self.assertTrue(np.alltrue(zh[[0, 1, 2, 4], :] == 0.0)) self.assertTrue(zh[3, 0] > 0)
def test_psgld_sparse(self): tf.reset_default_graph() z = tf.Variable(tf.zeros((5, 2)), dtype=tf.float32) idx = tf.placeholder(tf.int32) zi = tf.gather(z, idx) zloss = tf.square(zi - [10.0, 5.0]) psgld = pSGLD(learning_rate=0.4) train_op_psgld = psgld.minimize(zloss) sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) self.assertTrue(np.alltrue(sess.run(z) == 0.0)) sess.run(train_op_psgld, feed_dict={idx: 3}) zh = sess.run(z) self.assertTrue(np.alltrue(zh[[0, 1, 2, 4], :] == 0.0)) self.assertTrue(zh[3, 0] > 0)
def test_save_restore(): tf.reset_default_graph() sess = tf.Session() path = '/tmp/tensor_saved_test2' meta_path = path + '.meta' r = tf.train.import_meta_graph(meta_path) r.restore(sess, path) s = _get_saver(100) s.save(sess, path) tf.reset_default_graph() sess2 = tf.Session() r2 = tf.train.import_meta_graph(meta_path) r2.restore(sess2, path) s2 = _get_saver(100) s2.save(sess2, path)
def get_session(): tf.reset_default_graph() tf_config = tf.ConfigProto( inter_op_parallelism_threads=1, intra_op_parallelism_threads=1) # This was the default provided in the starter code. #session = tf.Session(config=tf_config) # Use this if I want to see what is on the GPU. #session = tf.Session(config=tf.ConfigProto(log_device_placement=True)) # Use this for limiting memory allocated for the GPU. gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) session = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) print("AVAILABLE GPUS: ", get_available_gpus()) return session
def setUp(self): super(RCNNProposalTest, self).setUp() self._num_classes = 3 self._image_shape = (900, 1440) self._config = EasyDict({ 'class_max_detections': 100, 'class_nms_threshold': 0.6, 'total_max_detections': 300, 'min_prob_threshold': 0.0, }) self._equality_delta = 1e-03 self._shared_model = RCNNProposal(self._num_classes, self._config) tf.reset_default_graph()
def setUp(self): super(RCNNTargetTest, self).setUp() # We don't care about the class labels or the batch number in most of # these tests. self._num_classes = 5 self._placeholder_label = 3. self._config = EasyDict({ 'foreground_threshold': 0.5, 'background_threshold_high': 0.5, 'background_threshold_low': 0.1, 'foreground_fraction': 0.5, 'minibatch_size': 2, }) # We check for a difference smaller than this numbers in our tests # instead of checking for exact equality. self._equality_delta = 1e-03 self._shared_model = RCNNTarget( self._num_classes, self._config, seed=0 ) tf.reset_default_graph()
def setUp(self): self.base_config = EasyDict({ 'dataset': { 'dir': '', 'split': 'train', 'image_preprocessing': { 'min_size': 600, 'max_size': 1024, }, 'data_augmentation': {}, }, 'train': { 'num_epochs': 1, 'batch_size': 1, 'random_shuffle': False, 'seed': None, } }) tf.reset_default_graph()
def build_graph(self): """ Builds graph """ # Clear the grapht tf.reset_default_graph() # Create the placeholder for the input nx = self.enc_dim[0] self.x = tf.placeholder("float", shape=[None, nx], name='x') # Builds the various components if self.mode == 'train': self.build_enc() self.build_dec() self.build_loss_fn() # Add the summary op self.summary_op = tf.summary.merge_all() # Create a saver self.saver = tf.train.Saver()
def demonstrate_loading_two_instances_of_model1(): print("="*60 + " Demonstrate loading weights from model1 into two instances of model1 in scopeA and scopeB") tf.reset_default_graph() with tf.variable_scope("scopeA") as scope: m1a = Model1() print ("-" * 40 + " Trying to load model1 weights: should fail") try: m1a.model.load("model1.tfl", weights_only=True) except Exception as err: print ("Loading failed, with error as expected, because variables are in scopeA") print ("error: %s" % str(err)) print ("-" * 40) print ("=" * 60 + " Trying to load model1 weights: should succeed") m1a.model.load("model1.tfl", scope_for_restore="scopeA", verbose=True, weights_only=True) with tf.variable_scope("scopeB") as scope: m1b = Model1() m1b.model.load("model1.tfl", scope_for_restore="scopeB", verbose=True, weights_only=True) print ("="*60 + " Successfully restored weights to two instances of model1, in different scopes")
def test_main2(): ''' Integration test - training then prediction ''' import tempfile tempdir = tempfile.mkdtemp() arglist = "--data-dir %s -e 2 --iter-num=1 -v -v --tensorboard-verbose=1 train 5000" % tempdir arglist = arglist.split(' ') tf.reset_default_graph() ts2s = CommandLine(arglist=arglist) wfn = ts2s.weights_output_fn assert os.path.exists(wfn) arglist = "-i %s predict 1 2 3 4 5 6 7 8 9 0" % wfn arglist = arglist.split(' ') tf.reset_default_graph() ts2s = CommandLine(arglist=arglist) assert len(ts2s.prediction_results[0][0])==10 os.system("rm -rf %s" % tempdir)
def set_up_model(): tf.reset_default_graph() X = tf.placeholder(tf.float32, shape=[None, 784]) y = tf.placeholder(tf.float32, shape=[None, 10]) W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) X_image = tf.reshape(X, [-1,28,28,1]) h_conv1 = tf.nn.relu(conv2d(X_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) h_fc2 = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2) losses = -tf.reduce_sum(y*tf.log(h_fc2), reduction_indices=[1]) return losses, [X, y], [W_conv1, b_conv1, W_conv2, b_conv2, W_fc1, b_fc1, W_fc2, b_fc2]
def setUp(self): # Set up model tf.reset_default_graph() X = tf.placeholder(tf.float32, shape=[None, 784]) y = tf.placeholder(tf.float32, shape=[None, 10]) W_fc1 = weight_variable([784, 1024]) b_fc1 = bias_variable([1024]) h_fc1 = tf.nn.relu(tf.matmul(X, W_fc1) + b_fc1) W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) h_fc2 = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2) losses = -tf.reduce_sum(y*tf.log(h_fc2), reduction_indices=[1]) self.loss = tf.reduce_mean(losses) self.batch_size = tf.cast(tf.gather(tf.shape(losses), 0), tf.float32) self.var_list = [W_fc1, b_fc1, W_fc2, b_fc2] self.X = X self.y = y self.sess = tf.Session() self.sess.run(tf.initialize_all_variables()) self.mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
def testModuleInfo_multiple_subgraph(self): # pylint: disable=not-callable tf.reset_default_graph() dumb = DumbModule(name="dumb_a") ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,)) dumb(ph_0) with tf.name_scope("foo"): dumb(ph_0) def check(): sonnet_collection = tf.get_default_graph().get_collection( base_info.SONNET_COLLECTION_NAME) self.assertEqual(len(sonnet_collection), 1) self.assertEqual(len(sonnet_collection[0].connected_subgraphs), 2) connected_subgraph_0 = sonnet_collection[0].connected_subgraphs[0] connected_subgraph_1 = sonnet_collection[0].connected_subgraphs[1] self.assertEqual(connected_subgraph_0.name_scope, "dumb_a") self.assertEqual(connected_subgraph_1.name_scope, "foo/dumb_a") check() _copy_default_graph() check()
def testModuleInfo_sparsetensor(self): # pylint: disable=not-callable tf.reset_default_graph() dumb = DumbModule(name="dumb_a") sparse_tensor = tf.SparseTensor( indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)), values=tf.placeholder(dtype=tf.float32, shape=(10,)), dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,))) dumb(sparse_tensor) def check(): sonnet_collection = tf.get_default_graph().get_collection( base_info.SONNET_COLLECTION_NAME) connected_subgraph = sonnet_collection[0].connected_subgraphs[0] self.assertIsInstance( connected_subgraph.inputs["inputs"], tf.SparseTensor) self.assertIsInstance(connected_subgraph.outputs, tf.SparseTensor) check() _copy_default_graph() check()
def testModuleInfo_namedtuple(self): # pylint: disable=not-callable tf.reset_default_graph() dumb = DumbModule(name="dumb_a") ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,)) ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,)) dumb(DumbNamedTuple(ph_0, ph_1)) def check(): sonnet_collection = tf.get_default_graph().get_collection( base_info.SONNET_COLLECTION_NAME) connected_subgraph = sonnet_collection[0].connected_subgraphs[0] self.assertTrue( base_info._is_namedtuple(connected_subgraph.inputs["inputs"])) self.assertTrue(base_info._is_namedtuple(connected_subgraph.outputs)) check() _copy_default_graph() check()
def testModuleInfo_dict(self): # pylint: disable=not-callable tf.reset_default_graph() dumb = DumbModule(name="dumb_a") ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,)) ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,)) dumb({"ph_0": ph_0, "ph_1": ph_1}) def check(): sonnet_collection = tf.get_default_graph().get_collection( base_info.SONNET_COLLECTION_NAME) connected_subgraph = sonnet_collection[0].connected_subgraphs[0] self.assertIsInstance(connected_subgraph.inputs["inputs"], dict) self.assertIsInstance(connected_subgraph.outputs, dict) check() _copy_default_graph() check()
def testModuleInfo_recursion(self): # pylint: disable=not-callable tf.reset_default_graph() dumb = DumbModule(name="dumb_a", no_nest=True) ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,)) val = {"one": ph_0, "self": None} val["self"] = val dumb(val) def check(check_type): sonnet_collection = tf.get_default_graph().get_collection( base_info.SONNET_COLLECTION_NAME) connected_subgraph = sonnet_collection[0].connected_subgraphs[0] self.assertIsInstance(connected_subgraph.inputs["inputs"]["one"], tf.Tensor) self.assertIsInstance( connected_subgraph.inputs["inputs"]["self"], check_type) self.assertIsInstance(connected_subgraph.outputs["one"], tf.Tensor) self.assertIsInstance(connected_subgraph.outputs["self"], check_type) check(dict) _copy_default_graph() check(base_info._UnserializableObject)
def __init__(self, dataset_name, model_name, net_constructor): # Initialize all defaults self.dataset_name = dataset_name self.model_name = model_name self.num_iterations = 200 self.iterations_per_test = 5 self.display_iter = 5 self.snapshot_iter = 1000000 self.train_batch_size = 0 self.test_batch_size = 0 self.crop_if_possible = True self.debug = False self.starter_learning_rate = 0.1 self.learning_rate_exp = 0.1 self.learning_rate_step = 1000 self.reports = {} self.silent = False self.optimizer = 'momentum' self.net_constructor = net_constructor self.net = GraphCNNNetwork() self.net_desc = GraphCNNNetworkDescription() tf.reset_default_graph() # print_ext can be disabled through the silent flag
def _check_adam(): for _mode in HO_MODES[:2]: for _model in IMPLEMENTED_MODEL_TYPES[1:2]: _model_kwargs = {'dims': [None, 300, 300, None]} tf.reset_default_graph() # set random seeds!!!! np.random.seed(1) tf.set_random_seed(1) experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode, epochs=3, optimizer=rf.AdamOptimizer, optimizer_kwargs={'lr': tf.Variable(.001, name='eta_adam')}, model=_model, model_kwargs=_model_kwargs, set_T=100, )
def _check_forward(): w_100 = [] for i in range(1): for _mode in HO_MODES[0:1]: for _model in IMPLEMENTED_MODEL_TYPES[0:2]: _model_kwargs = {} # {'dims': [None, 300, 300, None]} tf.reset_default_graph() # set random seeds!!!! np.random.seed(1) tf.set_random_seed(1) results = experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=10, mode=_mode, epochs=None, model=_model, model_kwargs=_model_kwargs, set_T=1000, synthetic_hypers=None, hyper_batch_size=100 # optimizer=rf.GradientDescentOptimizer, # optimizer_kwargs={'lr': tf.Variable(.01, name='eta')} ) w_100.append(results[0]['weights']) # rf.save_obj(w_100, 'check_forward') return w_100
def _check_all_methods(): for _mode in HO_MODES[:]: for _model in IMPLEMENTED_MODEL_TYPES: # _model_kwargs = {'dims': [None, 300, 300, None]} tf.reset_default_graph() # set random seeds!!!! np.random.seed(1) tf.set_random_seed(1) experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode, # epochs=3, model=_model, # model_kwargs=_model_kwargs, set_T=100, synthetic_hypers=None, hyper_batch_size=100 # optimizer=rf.GradientDescentOptimizer, # optimizer_kwargs={'lr': tf.Variable(.01, name='eta')} )
def _check_cnn(): print('END') for _mode in HO_MODES[2:3]: for _model in IMPLEMENTED_MODEL_TYPES[2:3]: tf.reset_default_graph() np.random.seed(1) tf.set_random_seed(1) _model_kwargs = {'conv_dims': [[5, 5, 1, 2], [5, 5, 2, 4], [5, 5, 4, 8]], 'ffnn_dims': [128, 10]} # noinspection PyTypeChecker experiment('test_with_model_' + _model, collect_data=False, hyper_iterations=3, mode=_mode, epochs=2, model=_model, model_kwargs=_model_kwargs, set_T=100, synthetic_hypers=None, hyper_batch_size=100, l1=None, l2=None # optimizer=rf.GradientDescentOptimizer, # optimizer_kwargs={'lr': tf.Variable(.01, name='eta')} )
def load_neural_network(self): meanStdInput = pd.read_csv(self.meanStdInputPath, sep = ',').set_index('Unnamed: 0').as_matrix() self.meanInput = np.array(meanStdInput[0]) self.stdInput = np.array(meanStdInput[1]) meanStdOutput = pd.read_csv(self.meanStdOutputPath, sep = ',').set_index('Unnamed: 0').as_matrix() self.meanOutput = np.array(meanStdOutput[0]) self.stdOutput = np.array(meanStdOutput[1]) tf.reset_default_graph() with tf.Graph().as_default(), tf.Session() as self.sess: self.x = tf.placeholder('float32', [None, self.inputSize]) # Input Tensor self.y_ = tf.placeholder('float32', [None, self.outputSize]) # Output Tensor self.create_NN() self.sess.run(tf.global_variables_initializer()) self.sess = tf.Session(config = tf.ConfigProto(log_device_placement = True)) saver = tf.train.Saver() saver = tf.train.import_meta_graph(self.ANNPath + '.meta') saver.restore(self.sess, self.ANNPath) print('Artificial Neural Network from: ' + self.saveFolder + ' loaded !')
def precompute_probs_for_tag(tag, userfold): hps = hypers.hps_for_tag(tag, mode=hypers.Mode.inference) tf.logging.info('Creating model') dat = BasketDataset(hps, userfold) model = rnnmodel.RNNModel(hps, dat) sess = tf.InteractiveSession() # Load pretrained weights tf.logging.info('Loading weights') utils.load_checkpoint_for_tag(tag, sess) # TODO: deal with 'test mode' tf.logging.info('Calculating probabilities') probmap = get_probmap(model, sess) # Hack because of silly reasons. if userfold == 'validation_full': userfold = 'validation' common.save_pdict_for_tag(tag, probmap, userfold) sess.close() tf.reset_default_graph() return probmap
def main(): # preparations create_checkpoints_dir() utils.download_train_and_test_data() trainset, testset = utils.load_data_sets() # create real input for the GAN model (its dicriminator) and # GAN model itself real_size = (32, 32, 3) z_size = 100 learning_rate = 0.0003 tf.reset_default_graph() input_real = tf.placeholder(tf.float32, (None, *real_size), name='input_real') net = GAN(input_real, z_size, learning_rate) # craete dataset dataset = Dataset(trainset, testset) # train the model batch_size = 128 epochs = 25 _, _, _ = train(net, dataset, epochs, batch_size, z_size)
def close(self): tf.reset_default_graph() self.sess.close()