Python tflearn 模块,embedding() 实例源码

我们从Python开源项目中,提取了以下15个代码示例,用于说明如何使用tflearn.embedding()

项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def convolve_me(self, hyp, pd):
        network = input_data(shape=[None, pd.max_sequence], name='input')
        network = tflearn.embedding(network,
                                    input_dim=pd.vocab_size,
                                    output_dim=pd.emb_size,
                                    name="embedding")
        branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
        branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
        branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
        network = merge([branch1, branch2, branch3], mode='concat', axis=1)
        network = tf.expand_dims(network, 2)
        network = global_max_pool(network)
        network = dropout(network, 0.5)
        network = fully_connected(network, 2, activation='softmax')
        network = regression(network, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy', name='target')
        return network
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def basic_pony(self, hyp, pd):
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding")
        net = tflearn.lstm(net,
                           32,
                           dynamic=False,
                           name="lstm")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      name="output",
                                      restore=True)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def little_pony(self, hyp, pd):
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding")
        net = tflearn.lstm(net,
                           256,
                           dynamic=True,
                           name="lstm")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      name="output",
                                      restore=True)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=0.01,
                                 loss='categorical_crossentropy')
        return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def little_gru(self, hyp, pd):
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding")
        net = tflearn.gru(net,
                           256,
                           dynamic=True,
                           name="gru")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      name="output",
                                      restore=True)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def train():
    embedding = generate_embedding()
    data = utils.load_sst('sst_data.pkl')
    net = generate_net(embedding)
    print("Loading model definition for %s..." % model)
    model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0)
    net = models.get_model(model)

    print("Training...")
    model.fit(data.trainX, data.trainY,
              validation_set=(data.valX, data.valY),
              show_metric=True, batch_size=128)

    print("Saving Model...")
    model_path = '%s.tflearn' % model
    model.save(model_path)
    print("Saved model to %s" % model_path)
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def big_boy(self, hyp, pd):
        restore = True
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding",
                                     restore=restore)

        net = tflearn.lstm(net,
                           512,
                           dropout=hyp.lstm.dropout,
                           weights_init='uniform_scaling',
                           dynamic=True,
                           name="lstm",
                           restore=restore)

        net = tflearn.fully_connected(net,
                                      128,
                                      activation='sigmoid',
                                      regularizer='L2',
                                      weight_decay=hyp.middle.weight_decay,
                                      weights_init='uniform_scaling',
                                      name="middle",
                                      restore=restore)

        net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      regularizer='L2',
                                      weight_decay=hyp.output.weight_decay,
                                      weights_init='uniform_scaling',
                                      name="output",
                                      restore=restore)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_recurrent_layers(self):

        X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]]
        Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.embedding(g, input_dim=12, output_dim=4)
            g = tflearn.lstm(g, 6)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=300, snapshot_epoch=False)
            self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
项目:npi    作者:siddk    | 项目源码 | 文件源码
def build_program_store(self):
        """
        Build the Program Embedding (M_prog) that takes in a specific Program ID (prg_in), and
        returns the respective Program Embedding.

        Reference: Reed, de Freitas [4]
        """
        embedding = tflearn.embedding(self.prg_in, CONFIG["PROGRAM_NUM"],
                                      CONFIG["PROGRAM_EMBEDDING_SIZE"], name="Program_Embedding")
        return embedding
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def generate_embedding():
    with open('stanfordSentimentTreebank/datasetSentences.txt') as f:
        sentences = f.read().splitlines()
    embedding = gensim.models.Word2Vec(sentences)
    embedding = gensim.models.Word2Vec() # an empty model, no training
    return embedding
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def generate_net(embedding):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=300000, output_dim=128)
    net = tflearn.lstm(net, 128)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy')
    return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def spectacular_bid(self, hyp, pd):
        net = tflearn.input_data(
            [None, pd.max_sequence]
            ,dtype=tf.float32
        )
        net = tflearn.embedding(
            net, 
            input_dim=pd.vocab_size,
            output_dim=pd.emb_size,
            name="embedding"
        )
        net = tflearn.lstm(
            net,
            750,
            dynamic=True,
            name="lstm_1",                 
            return_seq=True,
            dropout=hyp.lstm.dropout
        )
        net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout")
        net = tflearn.lstm(
            net,
            750,
            name="lstm_2",
            return_seq=False
        )   
        net = tflearn.fully_connected(
            net,
            2,
            activation='softmax',
            name="output",
            regularizer='L2',
            weight_decay=hyp.output.weight_decay
        )
        net = tflearn.regression(
            net,
            optimizer='adam',
            learning_rate=hyp.regression.learning_rate,
            loss='categorical_crossentropy'
        )

        return net
项目:NER-RNN    作者:dhwajraj    | 项目源码 | 文件源码
def get_input(FILE_NAME):
    word = []
    tag = []

    sentence = []
    sentence_tag = []

    #get max words in sentence
    max_sentence_length = MAX_DOCUMENT_LENGTH #findMaxLenght(FILE_NAME)
    sentence_length = 0

    print ("max sentence size is : " + str(max_sentence_length))


    for line in open(FILE_NAME):
        if line in ['\n', '\r\n']:
            #print("aa"+str(sentence_length) )
            for _ in range(max_sentence_length - sentence_length):
                tag.append(np.asarray([0,0,0,0,0]))
                temp = getEmb("~#~")
                word.append(temp)

            sentence.append(word)
            #print(len(word))
            sentence_tag.append(np.asarray(tag))

            sentence_length = 0    
            word = []
            tag = []


        else:
            assert(len(line.split()) == 4)
            if sentence_length>=max_sentence_length:
                continue
            sentence_length += 1
            temp = getEmb(line.split()[0])
            temp  = np.append(temp,pos(line.split()[1])) # adding pos embeddings
            temp = np.append(temp,chunk(line.split()[2])) # adding chunk embeddings
            temp = np.append(temp,capital(line.split()[0])) # adding capital embedding
            word.append(temp)
            t = line.split()[3]

            # Five classes 0-None,1-Person,2-Location,3-Organisation,4-Misc

            if t.endswith('O'):
                tag.append(np.asarray([1, 0, 0, 0, 0]))
            elif t.endswith('PER'):
                tag.append(np.asarray([0, 1, 0, 0, 0]))
            elif t.endswith('LOC'):
                tag.append(np.asarray([0, 0, 1, 0, 0]))
            elif t.endswith('ORG'):
                tag.append(np.asarray([0, 0, 0, 1, 0]))
            elif t.endswith('MISC'):
                tag.append(np.asarray([0, 0, 0, 0, 1]))
            else:
                print("error in input"+str(t))

    assert(len(sentence) == len(sentence_tag))
    return np.asarray(sentence), sentence_tag
项目:MKGBackEnd    作者:ShadowWalker627    | 项目源码 | 文件源码
def get_input(FILE_NAME):
    word = []
    tag = []

    sentence = []
    sentence_tag = []

    #get max words in sentence
    max_sentence_length = MAX_DOCUMENT_LENGTH #findMaxLenght(FILE_NAME)
    sentence_length = 0

    print ("max sentence size is : " + str(max_sentence_length))


    for line in open(FILE_NAME):
        if line in ['\n', '\r\n']:
            #print("aa"+str(sentence_length) )
            for _ in range(max_sentence_length - sentence_length):
                tag.append(np.asarray([0,0,0,0,0]))
                temp = getEmb("~#~")
                word.append(temp)

            sentence.append(word)
            #print(len(word))
            sentence_tag.append(np.asarray(tag))

            sentence_length = 0    
            word = []
            tag = []


        else:
            assert(len(line.split()) == 4)
            if sentence_length>=max_sentence_length:
                continue
            sentence_length += 1
            temp = getEmb(line.split()[0])
            temp  = np.append(temp,pos(line.split()[1])) # adding pos embeddings
            temp = np.append(temp,chunk(line.split()[2])) # adding chunk embeddings
            temp = np.append(temp,capital(line.split()[0])) # adding capital embedding
            word.append(temp)
            t = line.split()[3]

            # Five classes 0-None,1-Person,2-Location,3-Organisation,4-Misc

            if t.endswith('O'):
                tag.append(np.asarray([1, 0, 0, 0, 0]))
            elif t.endswith('PER'):
                tag.append(np.asarray([0, 1, 0, 0, 0]))
            elif t.endswith('LOC'):
                tag.append(np.asarray([0, 0, 1, 0, 0]))
            elif t.endswith('ORG'):
                tag.append(np.asarray([0, 0, 0, 1, 0]))
            elif t.endswith('MISC'):
                tag.append(np.asarray([0, 0, 0, 0, 1]))
            else:
                print("error in input"+str(t))

    assert(len(sentence) == len(sentence_tag))
    return np.asarray(sentence), sentence_tag