我们从Python开源项目中,提取了以下27个代码示例,用于说明如何使用tflearn.lstm()。
def buildModel(layers, hidden_nodes, maxlen, char_idx, dropout = False): g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return g # inputs: # data - textfile # outputs: # model - a TFlearn model file # dictionary - char_idx pickle # params: # history - max length of sequence to feed into neural net # layers - number of hidden layers of the network # epochs - how many epochs to run # hidden_nodes - how many nodes per hidden layer
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128, learning_rate=0.001): net = tflearn.input_data([None, 200]) net = tflearn.embedding(net, input_dim=embedding_size[0], output_dim=embedding_size[1], trainable=train_embedding, name='EmbeddingLayer') net = tflearn.lstm(net, hidden_dims, return_seq=True) net = tflearn.dropout(net, 0.5) net = tflearn.lstm(net, hidden_dims, return_seq=True) net = tflearn.dropout(net, 0.5) net = tflearn.lstm(net, hidden_dims, return_seq=True) net = tflearn.dropout(net, 0.5) net = tflearn.lstm(net, hidden_dims) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') return net
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9): network = input_data(shape=[None, width, height], name='input') #network = tflearn.input_data(shape=[None, 28, 28], name='input') network = tflearn.lstm(network, 128, return_seq=True) network = tflearn.lstm(network, 128) network = tflearn.fully_connected(network, 9, activation='softmax') network = tflearn.regression(network, optimizer='adam', loss='categorical_crossentropy', name="output1") model = tflearn.DNN(network, checkpoint_path='model_lstm', max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log') return model
def basic_pony(self, hyp, pd): net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding") net = tflearn.lstm(net, 32, dynamic=False, name="lstm") net = tflearn.fully_connected(net, 2, activation='softmax', name="output", restore=True) net = tflearn.regression(net, optimizer='adam', learning_rate=hyp.regression.learning_rate, loss='categorical_crossentropy') return net
def little_pony(self, hyp, pd): net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding") net = tflearn.lstm(net, 256, dynamic=True, name="lstm") net = tflearn.fully_connected(net, 2, activation='softmax', name="output", restore=True) net = tflearn.regression(net, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy') return net
def make_network(look_back, batch_size): """ Declare the layer types and sizes """ # create deep neural network with LSTM and fully connected layers net = tfl.input_data(shape=[None, look_back, 1], name='input') net = tfl.lstm(net, 32, activation='tanh', weights_init='xavier', name='LSTM1') net = tfl.fully_connected(net, 20, activation='relu', name='FC1') # net = tfl.dropout(net, 0.5) net = tfl.fully_connected(net, 40, activation='relu', name='FC2') # net = tfl.dropout(net, 0.5) net = tfl.fully_connected(net, 1, activation='linear', name='Linear') net = tfl.regression(net, batch_size=batch_size, optimizer='adam', learning_rate=0.005, loss='mean_square', name='target') col = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) for x in col: tf.add_to_collection(tf.GraphKeys.VARIABLES, x) return net
def __init__(self, s_date): prev_bd = int(s_date[:6])-1 prev_ed = int(s_date[9:15])-1 if prev_bd%100 == 0: prev_bd -= 98 if prev_ed%100 == 0: prev_ed -= 98 pred_s_date = "%d01_%d01" % (prev_bd, prev_ed) prev_model = '../model/tflearn/lstm/%s' % pred_s_date self.model_dir = '../model/tflearn/lstm/%s' % s_date tf.reset_default_graph() tflearn.init_graph(gpu_memory_fraction=0.1) input_layer = tflearn.input_data(shape=[None, 30, 23], name='input') lstm1 = tflearn.lstm(input_layer, 23, dynamic=True, name='lstm1') dense1 = tflearn.fully_connected(lstm1, 1, name='dense1') output = tflearn.single_unit(dense1) regression = tflearn.regression(output, optimizer='adam', loss='mean_square', metric='R2', learning_rate=0.001) self.estimators = tflearn.DNN(regression) if os.path.exists('%s/model.tfl' % prev_model): self.estimators.load('%s/model.tfl' % prev_model)
def npi_core(self): """ Build the NPI LSTM core, feeding the program embedding and state encoding to a multi-layered LSTM, returning the h-state of the final LSTM layer. References: Reed, de Freitas [2] """ s_in = self.state_encoding # Shape: [bsz, state_dim] p_in = self.program_embedding # Shape: [bsz, 1, program_dim] # Reshape state_in s_in = tflearn.reshape(s_in, [-1, 1, self.state_dim]) # Shape: [bsz, 1, state_dim] # Concatenate s_in, p_in c = tflearn.merge([s_in, p_in], 'concat', axis=2) # Shape: [bsz, 1, state + prog] # Feed through Multi-Layer LSTM for i in range(self.npi_core_layers): c, [self.h_states[i]] = tflearn.lstm(c, self.npi_core_dim, return_seq=True, initial_state=self.h_states[i], return_states=True) # Return Top-Most LSTM H-State top_state = tf.split(1, 2, self.h_states[-1])[1] return top_state # Shape: [bsz, npi_core_dim]
def build_model(maxlen, char_idx, checkpoint_path): g = tflearn.input_data([None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0, checkpoint_path=checkpoint_path)
def big_boy(self, hyp, pd): restore = True net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32) net = tflearn.embedding(net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding", restore=restore) net = tflearn.lstm(net, 512, dropout=hyp.lstm.dropout, weights_init='uniform_scaling', dynamic=True, name="lstm", restore=restore) net = tflearn.fully_connected(net, 128, activation='sigmoid', regularizer='L2', weight_decay=hyp.middle.weight_decay, weights_init='uniform_scaling', name="middle", restore=restore) net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout") net = tflearn.fully_connected(net, 2, activation='softmax', regularizer='L2', weight_decay=hyp.output.weight_decay, weights_init='uniform_scaling', name="output", restore=restore) net = tflearn.regression(net, optimizer='adam', learning_rate=hyp.regression.learning_rate, loss='categorical_crossentropy') return net
def test_sequencegenerator(self): with tf.Graph().as_default(): text = "123456789101234567891012345678910123456789101234567891012345678910" maxlen = 5 X, Y, char_idx = \ tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3) g = tflearn.input_data(shape=[None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(10, temperature=.5, seq_seed="12345") #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'") # Testing save method m.save("test_seqgen.tflearn") self.assertTrue(os.path.exists("test_seqgen.tflearn.index")) # Testing load method m.load("test_seqgen.tflearn") res = m.generate(10, temperature=.5, seq_seed="12345") # TODO: Fix test #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
def test_recurrent_layers(self): X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]] Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]] with tf.Graph().as_default(): g = tflearn.input_data(shape=[None, 4]) g = tflearn.embedding(g, input_dim=12, output_dim=4) g = tflearn.lstm(g, 6) g = tflearn.fully_connected(g, 2, activation='softmax') g = tflearn.regression(g, optimizer='sgd', learning_rate=1.) m = tflearn.DNN(g) m.fit(X, Y, n_epoch=300, snapshot_epoch=False) self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
def CharacterLSTM_Run(seed, dictionary, model, output, steps = 600, layers = 3, hidden_nodes = 512, history = 25, temperature = 0.5, dropout = False): char_idx_file = dictionary maxlen = history char_idx = None if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) tf.reset_default_graph() g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout) ''' g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) ''' m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen') m.load(model) #seed = random_sequence_from_textfile(data, maxlen) print('seed='+seed) print('len=' + str(len(seed))) result = m.generate(steps, temperature=temperature, seq_seed=seed[:history]) print (result) return result
def get_network(frames, input_size, num_classes): """Create our LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 128, dropout=0.8, return_seq=True) net = tflearn.lstm(net, 128) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
def get_network_deep(frames, input_size, num_classes): """Create a deeper LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True) net = tflearn.lstm(net, 64, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name="output1") return net
def get_network_wide(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 256, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
def get_network_wider(frames, input_size, num_classes): """Create a wider LSTM""" net = tflearn.input_data(shape=[None, frames, input_size]) net = tflearn.lstm(net, 512, dropout=0.2) net = tflearn.fully_connected(net, num_classes, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy', name='output1') return net
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128, learning_rate=0.001): net = tflearn.input_data([None, 200]) net = tflearn.embedding(net, input_dim=embedding_size[0], output_dim=embedding_size[1], trainable=train_embedding, name='EmbeddingLayer') net = tflearn.lstm(net, hidden_dims) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy') return net
def generate_net(embedding): net = tflearn.input_data([None, 200]) net = tflearn.embedding(net, input_dim=300000, output_dim=128) net = tflearn.lstm(net, 128) net = tflearn.dropout(net, 0.5) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy') return net
def initialize_model(self): char_idx_file = 'char_idx.pickle' maxlen = 25 char_idx = None if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) X, Y, char_idx = textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3, pre_defined_char_idx=char_idx) g = tflearn.input_data([None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.01) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0, checkpoint_path='model_tweets') # Load the model m.load("model.tfl") self.__text_model = m
def train(parameters): model, X, Y = prepare(parameters, dataset_needed=True) for i in range(50): model.fit(X, Y, validation_set=0.1, batch_size=128, n_epoch=1, run_id='lstm') print("-- TESTING...") seed = random_sequence_from_textfile(parameters['--input'], parameters['--maxlen']) print("-- Test with temperature of 1.0 --") print(model.generate(600, temperature=1.0, seq_seed=seed)) print("-- Test with temperature of 0.5 --") print(model.generate(600, temperature=0.5, seq_seed=seed)) print("-- Test with temperature of 0.25 --") print(model.generate(600, temperature=0.25, seq_seed=seed)) f.close()
def spectacular_bid(self, hyp, pd): net = tflearn.input_data( [None, pd.max_sequence] ,dtype=tf.float32 ) net = tflearn.embedding( net, input_dim=pd.vocab_size, output_dim=pd.emb_size, name="embedding" ) net = tflearn.lstm( net, 750, dynamic=True, name="lstm_1", return_seq=True, dropout=hyp.lstm.dropout ) net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout") net = tflearn.lstm( net, 750, name="lstm_2", return_seq=False ) net = tflearn.fully_connected( net, 2, activation='softmax', name="output", regularizer='L2', weight_decay=hyp.output.weight_decay ) net = tflearn.regression( net, optimizer='adam', learning_rate=hyp.regression.learning_rate, loss='categorical_crossentropy' ) return net
def test_sequencegenerator_words(self): with tf.Graph().as_default(): text = ["hello","world"]*100 word_idx = {"hello": 0, "world": 1} maxlen = 2 vec = [x for x in map(word_idx.get, text) if x is not None] sequences = [] next_words = [] for i in range(0, len(vec) - maxlen, 3): sequences.append(vec[i: i + maxlen]) next_words.append(vec[i + maxlen]) X = np.zeros((len(sequences), maxlen, len(word_idx)), dtype=np.bool) Y = np.zeros((len(sequences), len(word_idx)), dtype=np.bool) for i, seq in enumerate(sequences): for t, idx in enumerate(seq): X[i, t, idx] = True Y[i, next_words[i]] = True g = tflearn.input_data(shape=[None, maxlen, len(word_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(word_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=word_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'") # Testing save method m.save("test_seqgen_word.tflearn") self.assertTrue(os.path.exists("test_seqgen_word.tflearn.index")) # Testing load method m.load("test_seqgen_word.tflearn") res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "Reloaded SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")
def CharacterLSTM_Train(data, model, dictionary, history = 25, layers = 3, epochs = 10, hidden_nodes = 512, dropout = False): char_idx_file = dictionary maxlen = history char_idx = None ''' if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) print("---------------") print(char_idx) print(len(char_idx)) ''' X, Y, char_idx = textfile_to_semi_redundant_sequences(data, seq_maxlen=maxlen, redun_step=3) pickle.dump(char_idx, open(dictionary,'wb')) tf.reset_default_graph() print("layers " + str(layers) + " hidden " + str(hidden_nodes)) ''' g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) ''' g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen') #if model is not None: # m.load(model) #for i in range(epochs): #seed = random_sequence_from_textfile(data, maxlen) m.fit(X, Y, validation_set=0.1, batch_size=128, n_epoch=epochs, run_id='run_gen') print("Saving...") m.save(model) #print("-- TESTING...") #print("-- Test with temperature of 1.0 --") #print(m.generate(600, temperature=1.0, seq_seed=seed)) #print("-- Test with temperature of 0.5 --") #print(m.generate(600, temperature=0.5, seq_seed=seed)) # inputs: # data - textfile # in_model - a TFLearn model file # outputs: # out_model - a TFlearn model file # params: # history - max length of sequence to feed into neural net # layers - number of hidden layers of the network # epochs - how many epochs to run # hidden_nodes - how many nodes per hidden layer