我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用tflearn.SequenceGenerator()。
def build_model(maxlen, char_idx, checkpoint_path): g = tflearn.input_data([None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) return tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0, checkpoint_path=checkpoint_path)
def test_sequencegenerator(self): with tf.Graph().as_default(): text = "123456789101234567891012345678910123456789101234567891012345678910" maxlen = 5 X, Y, char_idx = \ tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3) g = tflearn.input_data(shape=[None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(10, temperature=.5, seq_seed="12345") #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'") # Testing save method m.save("test_seqgen.tflearn") self.assertTrue(os.path.exists("test_seqgen.tflearn.index")) # Testing load method m.load("test_seqgen.tflearn") res = m.generate(10, temperature=.5, seq_seed="12345") # TODO: Fix test #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
def CharacterLSTM_Run(seed, dictionary, model, output, steps = 600, layers = 3, hidden_nodes = 512, history = 25, temperature = 0.5, dropout = False): char_idx_file = dictionary maxlen = history char_idx = None if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) tf.reset_default_graph() g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout) ''' g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) ''' m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen') m.load(model) #seed = random_sequence_from_textfile(data, maxlen) print('seed='+seed) print('len=' + str(len(seed))) result = m.generate(steps, temperature=temperature, seq_seed=seed[:history]) print (result) return result
def initialize_model(self): char_idx_file = 'char_idx.pickle' maxlen = 25 char_idx = None if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) X, Y, char_idx = textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3, pre_defined_char_idx=char_idx) g = tflearn.input_data([None, maxlen, len(char_idx)]) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512, return_seq=True) g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, 512) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.01) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0, checkpoint_path='model_tweets') # Load the model m.load("model.tfl") self.__text_model = m
def test_sequencegenerator_words(self): with tf.Graph().as_default(): text = ["hello","world"]*100 word_idx = {"hello": 0, "world": 1} maxlen = 2 vec = [x for x in map(word_idx.get, text) if x is not None] sequences = [] next_words = [] for i in range(0, len(vec) - maxlen, 3): sequences.append(vec[i: i + maxlen]) next_words.append(vec[i + maxlen]) X = np.zeros((len(sequences), maxlen, len(word_idx)), dtype=np.bool) Y = np.zeros((len(sequences), len(word_idx)), dtype=np.bool) for i, seq in enumerate(sequences): for t, idx in enumerate(seq): X[i, t, idx] = True Y[i, next_words[i]] = True g = tflearn.input_data(shape=[None, maxlen, len(word_idx)]) g = tflearn.lstm(g, 32) g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(word_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.1) m = tflearn.SequenceGenerator(g, dictionary=word_idx, seq_maxlen=maxlen, clip_gradients=5.0) m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False) res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'") # Testing save method m.save("test_seqgen_word.tflearn") self.assertTrue(os.path.exists("test_seqgen_word.tflearn.index")) # Testing load method m.load("test_seqgen_word.tflearn") res = m.generate(4, temperature=.5, seq_seed=["hello","world"]) res_str = " ".join(res[-2:]) self.assertEqual(res_str, "hello world", "Reloaded SequenceGenerator (word level) test failed! Generated sequence: " + res_str + " expected 'hello world'")
def CharacterLSTM_Train(data, model, dictionary, history = 25, layers = 3, epochs = 10, hidden_nodes = 512, dropout = False): char_idx_file = dictionary maxlen = history char_idx = None ''' if os.path.isfile(char_idx_file): print('Loading previous char_idx') char_idx = pickle.load(open(char_idx_file, 'rb')) print("---------------") print(char_idx) print(len(char_idx)) ''' X, Y, char_idx = textfile_to_semi_redundant_sequences(data, seq_maxlen=maxlen, redun_step=3) pickle.dump(char_idx, open(dictionary,'wb')) tf.reset_default_graph() print("layers " + str(layers) + " hidden " + str(hidden_nodes)) ''' g = tflearn.input_data([None, maxlen, len(char_idx)]) for n in range(layers-1): g = tflearn.lstm(g, hidden_nodes, return_seq=True) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.lstm(g, hidden_nodes) if dropout: g = tflearn.dropout(g, 0.5) g = tflearn.fully_connected(g, len(char_idx), activation='softmax') g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001) ''' g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout) m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen') #if model is not None: # m.load(model) #for i in range(epochs): #seed = random_sequence_from_textfile(data, maxlen) m.fit(X, Y, validation_set=0.1, batch_size=128, n_epoch=epochs, run_id='run_gen') print("Saving...") m.save(model) #print("-- TESTING...") #print("-- Test with temperature of 1.0 --") #print(m.generate(600, temperature=1.0, seq_seed=seed)) #print("-- Test with temperature of 0.5 --") #print(m.generate(600, temperature=0.5, seq_seed=seed)) # inputs: # data - textfile # in_model - a TFLearn model file # outputs: # out_model - a TFlearn model file # params: # history - max length of sequence to feed into neural net # layers - number of hidden layers of the network # epochs - how many epochs to run # hidden_nodes - how many nodes per hidden layer