我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tflearn.regression()。
def buildModel(layers, hidden_nodes, maxlen, char_idx, dropout = False): g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return g # inputs: # data - textfile # outputs: # model - a TFlearn model file # dictionary - char_idx pickle # params: # history - max length of sequence to feed into neural net # layers - number of hidden layers of the network # epochs - how many epochs to run # hidden_nodes - how many nodes per hidden layer
def convolve_me(self, hyp, pd): network = input_data(shape=[None, pd.max_sequence], name='input') network = tflearn.embedding(network, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding") branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2") branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2") branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2") network = merge([branch1, branch2, branch3], mode='concat', axis=1) network = tf.expand_dims(network, 2) network = global_max_pool(network) network = dropout(network, 0.5) network = fully_connected(network, 2, activation='softmax') network = regression(network, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy', name='target') return network
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128, learning_rate=0.001): net = tflearn.input_data([None, 200]) net = tflearn.embedding(net, input_dim=embedding_size[0], output_dim=embedding_size[1], trainable=train_embedding, name='EmbeddingLayer') net = tflearn.lstm(net, hidden_dims, return_seq=True) net = tflearn.dropout(net, 0.5) net = tflearn.lstm(net, hidden_dims, return_seq=True) net = tflearn.dropout(net, 0.5) net = tflearn.lstm(net, hidden_dims, return_seq=True) net = tflearn.dropout(net, 0.5) net = tflearn.lstm(net, hidden_dims) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') return net
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def basic_pony(self, hyp, pd): net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding") net = tflearn.lstm(net, 32, dynamic=False, name="lstm") net = tflearn.fully_connected(net, 2, activation='softmax', name="output", restore=True) net = tflearn.regression(net, optimizer='adam', learning_rate=hyp.regression.learning_rate, loss='categorical_crossentropy') return net
def little_pony(self, hyp, pd): net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding") net = tflearn.lstm(net, 256, dynamic=True, name="lstm") net = tflearn.fully_connected(net, 2, activation='softmax', name="output", restore=True) net = tflearn.regression(net, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy') return net
def little_gru(self, hyp, pd): net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding") net = tflearn.gru(net, 256, dynamic=True, name="gru") net = tflearn.fully_connected(net, 2, activation='softmax', name="output", restore=True) net = tflearn.regression(net, optimizer='adam', learning_rate=hyp.regression.learning_rate, loss='categorical_crossentropy') return net
def __init__(self): inputs = tflearn.input_data(shape=[None, 784], name="input") with tf.variable_scope("scope1") as scope: net_conv = Model1.make_core_network(inputs) # shape (?, 10) with tf.variable_scope("scope2") as scope: net_dnn = Model2.make_core_network(inputs) # shape (?, 10) network = tf.concat([net_conv, net_dnn], 1, name="concat") # shape (?, 20) network = tflearn.fully_connected(network, 10, activation="softmax") network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') self.model = tflearn.DNN(network, tensorboard_verbose=0)
def test_feed_dict_no_None(self): X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]] Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4], name="X_in") g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = tflearn.conv_2d(g, 4, 2) g = tflearn.conv_2d(g, 4, 1) g = tflearn.max_pool_2d(g, 2) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) def do_fit(): m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False) self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
def build_simple_model(self): """Build a simple model for test Returns: DNN, [ (input layer name, input placeholder, input data) ], Target data """ inputPlaceholder1, inputPlaceholder2 = \ tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2") input1 = tflearn.input_data(placeholder = inputPlaceholder1) input2 = tflearn.input_data(placeholder = inputPlaceholder2) network = tflearn.merge([ input1, input2 ], "sum") network = tflearn.reshape(network, (1, 1)) network = tflearn.fully_connected(network, 1) network = tflearn.regression(network) return ( tflearn.DNN(network), [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ], self.TARGET, )
def make_network(look_back, batch_size): """ Declare the layer types and sizes """ # create deep neural network with LSTM and fully connected layers net = tfl.input_data(shape=[None, look_back, 1], name='input') net = tfl.lstm(net, 32, activation='tanh', weights_init='xavier', name='LSTM1') net = tfl.fully_connected(net, 20, activation='relu', name='FC1') # net = tfl.dropout(net, 0.5) net = tfl.fully_connected(net, 40, activation='relu', name='FC2') # net = tfl.dropout(net, 0.5) net = tfl.fully_connected(net, 1, activation='linear', name='Linear') net = tfl.regression(net, batch_size=batch_size, optimizer='adam', learning_rate=0.005, loss='mean_square', name='target') col = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) for x in col: tf.add_to_collection(tf.GraphKeys.VARIABLES, x) return net
def __init__(self, s_date): prev_bd = int(s_date[:6])-1 prev_ed = int(s_date[9:15])-1 if prev_bd%100 == 0: prev_bd -= 98 if prev_ed%100 == 0: prev_ed -= 98 pred_s_date = "%d01_%d01" % (prev_bd, prev_ed) prev_model = '../model/tflearn/lstm/%s' % pred_s_date self.model_dir = '../model/tflearn/lstm/%s' % s_date tf.reset_default_graph() tflearn.init_graph(gpu_memory_fraction=0.1) input_layer = tflearn.input_data(shape=[None, 30, 23], name='input') lstm1 = tflearn.lstm(input_layer, 23, dynamic=True, name='lstm1') dense1 = tflearn.fully_connected(lstm1, 1, name='dense1') output = tflearn.single_unit(dense1) regression = tflearn.regression(output, optimizer='adam', loss='mean_square', metric='R2', learning_rate=0.001) self.estimators = tflearn.DNN(regression) if os.path.exists('%s/model.tfl' % prev_model): self.estimators.load('%s/model.tfl' % prev_model)
def __init__(self): self.len_past = 30 #self.s_date = "20120101_20160330" #self.model_dir = '../model/tflearn/reg_l3_bn/big/%s/' % self.s_date tf.reset_default_graph() tflearn.init_graph(gpu_memory_fraction=0.05) input_layer = tflearn.input_data(shape=[None, 690], name='input') dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu') dense1n = tflearn.batch_normalization(dense1, name='BN1') dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu') dense2n = tflearn.batch_normalization(dense2, name='BN2') dense3 = tflearn.fully_connected(dense2n, 1, name='dense3') output = tflearn.single_unit(dense3) regression = tflearn.regression(output, optimizer='adam', loss='mean_square', metric='R2', learning_rate=0.001) self.estimators = tflearn.DNN(regression) self.qty = {} self.day_last = {} self.currency = 100000000
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): net = input_data(shape=[None, width, height, 3], name='input') net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) net = tflearn.layers.conv.resnext_block(net, n, 16, 32) net = tflearn.resnext_block(net, 1, 32, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 32, 32) net = tflearn.resnext_block(net, 1, 64, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 64, 32) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) # Regression net = tflearn.fully_connected(net, output, activation='softmax') opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=opt, loss='categorical_crossentropy') model = tflearn.DNN(net, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model
def create_network(): network = tflearn.input_data(shape=[None, num_history, fft_stored_size]) network = tflearn.reshape(network, [-1, num_history, fft_stored_size, 1]) network = tflearn.layers.conv.conv_2d(network, 32, 7) network = tflearn.layers.conv.max_pool_2d(network, 2) network = tflearn.layers.conv.conv_2d(network, 64, 5) network = tflearn.layers.conv.max_pool_2d(network, 2) network = tflearn.layers.conv.conv_2d(network, 128, 3) network = tflearn.layers.conv.max_pool_2d(network, 2) network = tflearn.layers.fully_connected(network, num_history*fft_stored_size*2, activation="tanh") network = tflearn.layers.fully_connected(network, num_history*fft_stored_size, activation="tanh") network = tflearn.reshape(network, [-1, num_history, fft_stored_size]) network = tflearn.regression(network, optimizer="adam", learning_rate=learning_rate, loss="mean_square") return network
def build_model(maxlen, char_idx, checkpoint_path): g = tflearn.input_data([None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0, checkpoint_path=checkpoint_path)
def big_boy(self, hyp, pd): restore = True net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding", restore=restore) net = tflearn.lstm(net, 512, dropout=hyp.lstm.dropout, weights_init='uniform_scaling', dynamic=True, name="lstm", restore=restore) net = tflearn.fully_connected(net, 128, activation='sigmoid', regularizer='L2', weight_decay=hyp.middle.weight_decay, weights_init='uniform_scaling', name="middle", restore=restore) net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout") net = tflearn.fully_connected(net, 2, activation='softmax', regularizer='L2', weight_decay=hyp.output.weight_decay, weights_init='uniform_scaling', name="output", restore=restore) net = tflearn.regression(net, optimizer='adam', learning_rate=hyp.regression.learning_rate, loss='categorical_crossentropy') return net
def model_for_type(neural_net_type, tile_size, on_band_count): """The neural_net_type can be: one_layer_relu, one_layer_relu_conv, two_layer_relu_conv.""" network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count]) # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf if neural_net_type == 'one_layer_relu': network = tflearn.fully_connected(network, 64, activation='relu') elif neural_net_type == 'one_layer_relu_conv': network = conv_2d(network, 64, 12, strides=4, activation='relu') network = max_pool_2d(network, 3) elif neural_net_type == 'two_layer_relu_conv': network = conv_2d(network, 64, 12, strides=4, activation='relu') network = max_pool_2d(network, 3) network = conv_2d(network, 128, 4, activation='relu') else: print("ERROR: exiting, unknown layer type for neural net") # classify as road or not road softmax = tflearn.fully_connected(network, 2, activation='softmax') # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf momentum = tflearn.optimizers.Momentum( learning_rate=.005, momentum=0.9, lr_decay=0.0002, name='Momentum') net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy') return tflearn.DNN(net, tensorboard_verbose=0)
def handle_speaker_rec_test_intent(self, message): speakers = data.get_speakers() number_classes=len(speakers) #print("speakers",speakers) #batch=data.wave_batch_generator(batch_size=1000, source=data.Source.DIGIT_WAVES, target=data.Target.speaker) #X,Y=next(batch) # Classification #tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5) net = tflearn.input_data(shape=[None, 8192]) #Two wave chunks net = tflearn.fully_connected(net, 64) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, number_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy') model = tflearn.DNN(net) #model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=100) CWD_PATH = os.path.dirname(__file__) path_to_model = os.path.join(CWD_PATH, 'model', 'model.tfl') model.load(path_to_model) demo_file = "8_Vicki_260.wav" #demo_file = "8_Bruce_260.wav" demo=data.load_wav_file(data.path + demo_file) result=model.predict([demo]) result=data.one_hot_to_item(result,speakers) if result == "Vicki": self.speak("I am confident I'm speaking to %s"%(result)) # ~ 97% correct else: self.speak("I'm sorry I don't recognize your voice")
def wide_model(self, inputs, n_inputs): ''' Model - wide, i.e. normal linear model (for logistic regression) ''' network = inputs # use fully_connected (instad of single_unit) because fc works properly with batches, whereas single_unit is 1D only network = tflearn.fully_connected(network, n_inputs, activation="linear", name="wide_linear", bias=False) # x*W (no bias) network = tf.reduce_sum(network, 1, name="reduce_sum") # batched sum, to produce logits network = tf.reshape(network, [-1, 1]) # so that accuracy is binary_accuracy if self.verbose: print ("Wide model network %s" % network) return network
def __init__(self): network = tflearn.input_data(shape=[None, 784], name="input") network = self.make_core_network(network) network = regression(network, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='target') model = tflearn.DNN(network, tensorboard_verbose=0) self.model = model
def __init__(self): # Building deep neural network network = tflearn.input_data(shape=[None, 784], name="input") network = self.make_core_network(network) # Regression using SGD with learning rate decay and Top-3 accuracy sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000) top_k = tflearn.metrics.Top_k(3) network = tflearn.regression(network, optimizer=sgd, metric=top_k, loss='categorical_crossentropy', name="target") model = tflearn.DNN(network, tensorboard_verbose=0) self.model = model
def test_dnn(self): with tf.Graph().as_default(): X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1] Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3] input = tflearn.input_data(shape=[None]) linear = tflearn.single_unit(input) regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square', metric='R2', learning_rate=0.01) m = tflearn.DNN(regression) # Testing fit and predict m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False) res = m.predict([3.2])[0] self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3") self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8") # Testing save method m.save("test_dnn.tflearn") self.assertTrue(os.path.exists("test_dnn.tflearn.index")) with tf.Graph().as_default(): input = tflearn.input_data(shape=[None]) linear = tflearn.single_unit(input) regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square', metric='R2', learning_rate=0.01) m = tflearn.DNN(regression) # Testing load method m.load("test_dnn.tflearn") res = m.predict([3.2])[0] self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3") self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8")
def test_dnn_loading_scope(self): with tf.Graph().as_default(): X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1] Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3] input = tflearn.input_data(shape=[None]) linear = tflearn.single_unit(input) regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square', metric='R2', learning_rate=0.01) m = tflearn.DNN(regression) # Testing fit and predict m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False) res = m.predict([3.2])[0] self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3") self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8") # Testing save method m.save("test_dnn.tflearn") self.assertTrue(os.path.exists("test_dnn.tflearn.index")) # Testing loading, with change of variable scope (saved with no scope, now loading into scopeA) with tf.Graph().as_default(): # start with clear graph with tf.variable_scope("scopeA") as scope: input = tflearn.input_data(shape=[None]) linear = tflearn.single_unit(input) regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square', metric='R2', learning_rate=0.01) m = tflearn.DNN(regression) def try_load(): m.load("test_dnn.tflearn") self.assertRaises(tf.errors.NotFoundError, try_load) # fails, since names in file don't have "scopeA" m.load("test_dnn.tflearn", variable_name_map=("scopeA/", "")) # succeeds, because variable names are rewritten res = m.predict([3.2])[0] self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3") self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8")
def test_conv_layers(self): X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]] Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = tflearn.conv_2d(g, 4, 2, activation='relu') g = tflearn.max_pool_2d(g, 2) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) m.fit(X, Y, n_epoch=100, snapshot_epoch=False) # TODO: Fix test #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5) # Bulk Tests with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1]) g = tflearn.conv_2d(g, 4, 2) g = tflearn.conv_2d(g, 4, 1) g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2]) g = tflearn.max_pool_2d(g, 2)
def test_recurrent_layers(self): X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]] Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.embedding(g, input_dim=12, output_dim=4) g = tflearn.lstm(g, 6) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) m.fit(X, Y, n_epoch=300, snapshot_epoch=False) self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
def CharacterLSTM_Run(seed, dictionary, model, output, steps = 600, layers = 3, hidden_nodes = 512, history = 25, temperature = 0.5, dropout = False): char_idx_file = dictionary maxlen = history char_idx = None if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) tf.reset_default_graph() g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout) ''' g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) ''' m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen') m.load(model) #seed = random_sequence_from_textfile(data, maxlen) print('seed='+seed) print('len=' + str(len(seed))) result = m.generate(steps, temperature=temperature, seq_seed=seed[:history]) print (result) return result
def get_network(frames, input_size, num_classes): """Create our LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 128, dropout=0.8, return_seq=True) net = tflearn.lstm(net, 128) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
def get_network_deep(frames, input_size, num_classes): """Create a deeper LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
def get_network_wide(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 256, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
def get_network_wider(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 512, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
def __init__(self, s_date, n_frame): self.n_epoch = 20 prev_bd = int(s_date[:6])-1 prev_ed = int(s_date[9:15])-1 if prev_bd%100 == 0: prev_bd -= 98 if prev_ed%100 == 0: prev_ed -= 98 pred_s_date = "%d01_%d01" % (prev_bd, prev_ed) prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date tf.reset_default_graph() tflearn.init_graph(gpu_memory_fraction=0.1) input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input') dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu') dense1n = tflearn.batch_normalization(dense1, name='BN1') dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu') dense2n = tflearn.batch_normalization(dense2, name='BN2') dense3 = tflearn.fully_connected(dense2n, 1, name='dense3') output = tflearn.single_unit(dense3) regression = tflearn.regression(output, optimizer='adam', loss='mean_square', metric='R2', learning_rate=0.001) self.estimators = tflearn.DNN(regression) if os.path.exists('%s/model.tfl' % prev_model): self.estimators.load('%s/model.tfl' % prev_model) self.n_epoch = 10 if not os.path.exists(self.model_dir): os.makedirs(self.model_dir)
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128, learning_rate=0.001): net = tflearn.input_data([None, 200]) net = tflearn.embedding(net, input_dim=embedding_size[0], output_dim=embedding_size[1], trainable=train_embedding, name='EmbeddingLayer') net = tflearn.lstm(net, hidden_dims) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') return net
def generate_net(embedding): net = tflearn.input_data([None, 200]) net = tflearn.embedding(net, input_dim=300000, output_dim=128) net = tflearn.lstm(net, 128) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy') return net
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height, 3], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model
def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): network = input_data(shape=[None, width, height,3, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path=model_name, max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def sentnet_frames(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height,frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def sentnet(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height, frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) network = conv_3d(network, 256, 5, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = avg_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def alexnet2(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def sentnet_v0(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height, frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, 3, activation='relu') network = conv_3d(network, 384, 3, 3, activation='relu') network = conv_3d(network, 256, 3, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def alexnet(width, height, lr, output=3): network = input_data(shape=[None, width, height, 1], name='input') network = conv_2d(network, 96, 11, strides=4, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 256, 5, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 384, 3, activation='relu') network = conv_2d(network, 256, 3, activation='relu') network = max_pool_2d(network, 3, strides=2) network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, output, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def sentnet2(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height, frame_count, 1], name='input') network = conv_3d(network, 96, 11, strides=4, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 256, 5, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 384, 3, activation='relu') network = conv_3d(network, 256, 3, activation='relu') network = max_pool_3d(network, 3, strides=2) #network = local_response_normalization(network) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 4096, activation='tanh') network = dropout(network, 0.5) network = fully_connected(network, 3, activation='softmax') network = regression(network, optimizer='momentum', loss='categorical_crossentropy', learning_rate=lr, name='targets') model = tflearn.DNN(network, checkpoint_path='model_alexnet', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model