Python tflearn 模块,dropout() 实例源码

我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用tflearn.dropout()

项目:easygen    作者:markriedl    | 项目源码 | 文件源码
def buildModel(layers, hidden_nodes, maxlen, char_idx, dropout = False):
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    for n in range(layers-1):
        g = tflearn.lstm(g, hidden_nodes, return_seq=True)
        if dropout:
            g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, hidden_nodes)
    if dropout:
        g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)
    return g

# inputs:
#   data - textfile
# outputs:
#   model - a TFlearn model file
#   dictionary - char_idx pickle
# params:
#   history - max length of sequence to feed into neural net
#   layers - number of hidden layers of the network
#   epochs - how many epochs to run
#   hidden_nodes - how many nodes per hidden layer
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def convolve_me(self, hyp, pd):
        network = input_data(shape=[None, pd.max_sequence], name='input')
        network = tflearn.embedding(network,
                                    input_dim=pd.vocab_size,
                                    output_dim=pd.emb_size,
                                    name="embedding")
        branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
        branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
        branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
        network = merge([branch1, branch2, branch3], mode='concat', axis=1)
        network = tf.expand_dims(network, 2)
        network = global_max_pool(network)
        network = dropout(network, 0.5)
        network = fully_connected(network, 2, activation='softmax')
        network = regression(network, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy', name='target')
        return network
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def make_network(look_back, batch_size):
    """
    Declare the layer types and sizes
    """
    # create deep neural network with LSTM and fully connected layers
    net = tfl.input_data(shape=[None, look_back, 1], name='input')
    net = tfl.lstm(net, 32, activation='tanh', weights_init='xavier', name='LSTM1')

    net = tfl.fully_connected(net, 20, activation='relu', name='FC1')
    # net = tfl.dropout(net, 0.5)
    net = tfl.fully_connected(net, 40, activation='relu', name='FC2')
    # net = tfl.dropout(net, 0.5)

    net = tfl.fully_connected(net, 1, activation='linear', name='Linear')
    net = tfl.regression(net, batch_size=batch_size, optimizer='adam', learning_rate=0.005, loss='mean_square',
                         name='target')
    col = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    for x in col:
        tf.add_to_collection(tf.GraphKeys.VARIABLES, x)
    return net
项目:make-lstm-great-again    作者:eleurent    | 项目源码 | 文件源码
def build_model(maxlen, char_idx, checkpoint_path):
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                           learning_rate=0.001)

    return tflearn.SequenceGenerator(g, dictionary=char_idx,
                                     seq_maxlen=maxlen,
                                     clip_gradients=5.0,
                                     checkpoint_path=checkpoint_path)
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def big_boy(self, hyp, pd):
        restore = True
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding",
                                     restore=restore)

        net = tflearn.lstm(net,
                           512,
                           dropout=hyp.lstm.dropout,
                           weights_init='uniform_scaling',
                           dynamic=True,
                           name="lstm",
                           restore=restore)

        net = tflearn.fully_connected(net,
                                      128,
                                      activation='sigmoid',
                                      regularizer='L2',
                                      weight_decay=hyp.middle.weight_decay,
                                      weights_init='uniform_scaling',
                                      name="middle",
                                      restore=restore)

        net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      regularizer='L2',
                                      weight_decay=hyp.output.weight_decay,
                                      weights_init='uniform_scaling',
                                      name="output",
                                      restore=restore)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def bidirectional(self, hyp, pd):
            restore = True
            net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
            net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                         output_dim=pd.emb_size,
                                         name="embedding",
                                         restore=restore)

            net = bidirectional_rnn(net,
                                    BasicLSTMCell(256),
                                    BasicLSTMCell(256),
                                    dynamic=True)

            net = tflearn.fully_connected(net,
                                          128,
                                          activation='sigmoid',
                                          regularizer='L2',
                                          weight_decay=hyp.middle.weight_decay,
                                          name="middle",
                                          restore=restore)

            net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout")
            net = tflearn.fully_connected(net,
                                          2,
                                          activation='softmax',
                                          regularizer='L2',
                                          weight_decay=hyp.output.weight_decay,
                                          name="output",
                                          restore=restore)
            net = tflearn.regression(net,
                                     optimizer='adam',
                                     learning_rate=hyp.regression.learning_rate,
                                     loss='categorical_crossentropy')
            return net
项目:skill-voice-recognition    作者:TREE-Edu    | 项目源码 | 文件源码
def handle_speaker_rec_test_intent(self, message):
        speakers = data.get_speakers()
        number_classes=len(speakers)
        #print("speakers",speakers)

        #batch=data.wave_batch_generator(batch_size=1000, source=data.Source.DIGIT_WAVES, target=data.Target.speaker)
        #X,Y=next(batch)


        # Classification
        #tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

        net = tflearn.input_data(shape=[None, 8192]) #Two wave chunks
        net = tflearn.fully_connected(net, 64)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.fully_connected(net, number_classes, activation='softmax')
        net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

        model = tflearn.DNN(net)
        #model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=100)

        CWD_PATH = os.path.dirname(__file__)
        path_to_model = os.path.join(CWD_PATH, 'model', 'model.tfl')
        model.load(path_to_model) 

        demo_file = "8_Vicki_260.wav"
        #demo_file = "8_Bruce_260.wav"
        demo=data.load_wav_file(data.path + demo_file)
        result=model.predict([demo])
        result=data.one_hot_to_item(result,speakers)
        if result == "Vicki":
            self.speak("I am confident I'm speaking to %s"%(result)) # ~ 97% correct
        else:
            self.speak("I'm sorry I don't recognize your voice")
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc,  dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc],    cc_size,  8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat([wide_inputs] + flat_vars, 1, name="deep_concat")
        for k in range(len(n_nodes)):
            network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
            if use_dropout:
                network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])  # so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def make_core_network(network):
        network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
        network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
        network = max_pool_2d(network, 2)
        network = local_response_normalization(network)
        network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
        network = max_pool_2d(network, 2)
        network = local_response_normalization(network)
        network = fully_connected(network, 128, activation='tanh')
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation='tanh')
        network = dropout(network, 0.8)
        network = fully_connected(network, 10, activation='softmax')
        return network
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def make_core_network(network):
        dense1 = tflearn.fully_connected(network, 64, activation='tanh',
                                         regularizer='L2', weight_decay=0.001, name="dense1")
        dropout1 = tflearn.dropout(dense1, 0.8)
        dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                         regularizer='L2', weight_decay=0.001, name="dense2")
        dropout2 = tflearn.dropout(dense2, 0.8)
        softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
        return softmax
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def vgg16(input, num_class):

    x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_sequencegenerator(self):

        with tf.Graph().as_default():
            text = "123456789101234567891012345678910123456789101234567891012345678910"
            maxlen = 5

            X, Y, char_idx = \
                tflearn.data_utils.string_to_semi_redundant_sequences(text, seq_maxlen=maxlen, redun_step=3)

            g = tflearn.input_data(shape=[None, maxlen, len(char_idx)])
            g = tflearn.lstm(g, 32)
            g = tflearn.dropout(g, 0.5)
            g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
            g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                                   learning_rate=0.1)

            m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                                          seq_maxlen=maxlen,
                                          clip_gradients=5.0)
            m.fit(X, Y, validation_set=0.1, n_epoch=100, snapshot_epoch=False)
            res = m.generate(10, temperature=.5, seq_seed="12345")
            #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed! Generated sequence: " + res + " expected '123456789101234'")

            # Testing save method
            m.save("test_seqgen.tflearn")
            self.assertTrue(os.path.exists("test_seqgen.tflearn.index"))

            # Testing load method
            m.load("test_seqgen.tflearn")
            res = m.generate(10, temperature=.5, seq_seed="12345")
            # TODO: Fix test
            #self.assertEqual(res, "123456789101234", "SequenceGenerator test failed after loading model! Generated sequence: " + res + " expected '123456789101234'")
项目:easygen    作者:markriedl    | 项目源码 | 文件源码
def CharacterLSTM_Run(seed, dictionary, model, output, steps = 600, layers = 3, hidden_nodes = 512, history = 25, temperature = 0.5, dropout = False):
    char_idx_file = dictionary
    maxlen = history

    char_idx = None
    if os.path.isfile(char_idx_file):
        print('Loading previous char_idx')
        char_idx = pickle.load(open(char_idx_file, 'rb'))

    tf.reset_default_graph()
    g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout)
    '''
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    for n in range(layers-1):
        g = tflearn.lstm(g, hidden_nodes, return_seq=True)
        if dropout:
            g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, hidden_nodes)
    if dropout:
        g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)
    '''
    m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen')

    m.load(model)

    #seed = random_sequence_from_textfile(data, maxlen)

    print('seed='+seed)
    print('len=' + str(len(seed)))
    result = m.generate(steps, temperature=temperature, seq_seed=seed[:history])
    print (result)
    return result
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def generate_net(embedding):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=300000, output_dim=128)
    net = tflearn.lstm(net, 128)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy')
    return net
项目:TensorFlow    作者:DiamonJoy    | 项目源码 | 文件源码
def vgg16(input, num_class):

    x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x
项目:bot    作者:acrosa    | 项目源码 | 文件源码
def initialize_model(self):
      char_idx_file = 'char_idx.pickle'
      maxlen = 25

      char_idx = None
      if os.path.isfile(char_idx_file):
        print('Loading previous char_idx')
        char_idx = pickle.load(open(char_idx_file, 'rb'))

      X, Y, char_idx = textfile_to_semi_redundant_sequences(path, seq_maxlen=maxlen, redun_step=3, pre_defined_char_idx=char_idx)

      g = tflearn.input_data([None, maxlen, len(char_idx)])
      g = tflearn.lstm(g, 512, return_seq=True)
      g = tflearn.dropout(g, 0.5)
      g = tflearn.lstm(g, 512, return_seq=True)
      g = tflearn.dropout(g, 0.5)
      g = tflearn.lstm(g, 512)
      g = tflearn.dropout(g, 0.5)
      g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
      g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                             learning_rate=0.01)

      m = tflearn.SequenceGenerator(g, dictionary=char_idx,
                              seq_maxlen=maxlen,
                              clip_gradients=5.0,
                              checkpoint_path='model_tweets')
      # Load the model
      m.load("model.tfl")
      self.__text_model = m
项目:tflearn_wide_and_deep    作者:ichuang    | 项目源码 | 文件源码
def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc,  dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc],    cc_size,  8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat(1, [wide_inputs] + flat_vars, name="deep_concat")
        for k in range(len(n_nodes)):
            network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
            if use_dropout:
                network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])  # so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
项目:MSTAR_tensorflow    作者:hamza-latif    | 项目源码 | 文件源码
def example_net(x):
    network = tflearn.conv_2d(x, 32, 3, activation='relu')
    network = tflearn.max_pool_2d(network, 2)
    network = tflearn.conv_2d(network, 64, 3, activation='relu')
    network = tflearn.conv_2d(network, 64, 3, activation='relu')
    network = tflearn.max_pool_2d(network, 2)
    network = tflearn.fully_connected(network, 512, activation='relu')
    network = tflearn.dropout(network, 0.5)
    network = tflearn.fully_connected(network, 3, activation='softmax')

    return network
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def spectacular_bid(self, hyp, pd):
        net = tflearn.input_data(
            [None, pd.max_sequence]
            ,dtype=tf.float32
        )
        net = tflearn.embedding(
            net, 
            input_dim=pd.vocab_size,
            output_dim=pd.emb_size,
            name="embedding"
        )
        net = tflearn.lstm(
            net,
            750,
            dynamic=True,
            name="lstm_1",                 
            return_seq=True,
            dropout=hyp.lstm.dropout
        )
        net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout")
        net = tflearn.lstm(
            net,
            750,
            name="lstm_2",
            return_seq=False
        )   
        net = tflearn.fully_connected(
            net,
            2,
            activation='softmax',
            name="output",
            regularizer='L2',
            weight_decay=hyp.output.weight_decay
        )
        net = tflearn.regression(
            net,
            optimizer='adam',
            learning_rate=hyp.regression.learning_rate,
            loss='categorical_crossentropy'
        )

        return net
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_core_layers(self):

        X = [[0., 0.], [0., 1.], [1., 0.], [1., 1.]]
        Y_nand = [[1.], [1.], [1.], [0.]]
        Y_or = [[0.], [1.], [1.], [1.]]

        # Graph definition
        with tf.Graph().as_default():
            # Building a network with 2 optimizers
            g = tflearn.input_data(shape=[None, 2])

            # Nand operator definition
            g_nand = tflearn.fully_connected(g, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 32, activation='linear')
            g_nand = tflearn.fully_connected(g_nand, 1, activation='sigmoid')
            g_nand = tflearn.regression(g_nand, optimizer='sgd',
                                        learning_rate=2.,
                                        loss='binary_crossentropy')
            # Or operator definition
            g_or = tflearn.fully_connected(g, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 32, activation='linear')
            g_or = tflearn.fully_connected(g_or, 1, activation='sigmoid')
            g_or = tflearn.regression(g_or, optimizer='sgd',
                                      learning_rate=2.,
                                      loss='binary_crossentropy')
            # XOR merging Nand and Or operators
            g_xor = tflearn.merge([g_nand, g_or], mode='elemwise_mul')

            # Training
            m = tflearn.DNN(g_xor)
            m.fit(X, [Y_nand, Y_or], n_epoch=400, snapshot_epoch=False)

            # Testing
            self.assertLess(m.predict([[0., 0.]])[0][0], 0.01)
            self.assertGreater(m.predict([[0., 1.]])[0][0], 0.9)
            self.assertGreater(m.predict([[1., 0.]])[0][0], 0.9)
            self.assertLess(m.predict([[1., 1.]])[0][0], 0.01)

        # Bulk Tests
        with tf.Graph().as_default():
            net = tflearn.input_data(shape=[None, 2])
            net = tflearn.flatten(net)
            net = tflearn.reshape(net, new_shape=[-1])
            net = tflearn.activation(net, 'relu')
            net = tflearn.dropout(net, 0.5)
            net = tflearn.single_unit(net)
项目:easygen    作者:markriedl    | 项目源码 | 文件源码
def CharacterLSTM_Train(data, model, dictionary, history = 25, layers = 3, epochs = 10, hidden_nodes = 512, dropout = False):
    char_idx_file = dictionary
    maxlen = history

    char_idx = None
    '''
    if os.path.isfile(char_idx_file):
        print('Loading previous char_idx')
        char_idx = pickle.load(open(char_idx_file, 'rb'))
    print("---------------")
    print(char_idx)
    print(len(char_idx))
    '''

    X, Y, char_idx = textfile_to_semi_redundant_sequences(data, seq_maxlen=maxlen, redun_step=3)

    pickle.dump(char_idx, open(dictionary,'wb'))

    tf.reset_default_graph()
    print("layers " + str(layers) + " hidden " + str(hidden_nodes))
    '''
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    for n in range(layers-1):
        g = tflearn.lstm(g, hidden_nodes, return_seq=True)
        if dropout:
            g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, hidden_nodes)
    if dropout:
        g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)
    '''
    g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout)
    m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen')

    #if model is not None:
    #   m.load(model)

    #for i in range(epochs):
    #seed = random_sequence_from_textfile(data, maxlen)
    m.fit(X, Y, validation_set=0.1, batch_size=128, n_epoch=epochs, run_id='run_gen')
    print("Saving...")
    m.save(model)
    #print("-- TESTING...")
    #print("-- Test with temperature of 1.0 --")
    #print(m.generate(600, temperature=1.0, seq_seed=seed))
    #print("-- Test with temperature of 0.5 --")
    #print(m.generate(600, temperature=0.5, seq_seed=seed))


# inputs:
#   data - textfile
#   in_model - a TFLearn model file
# outputs:
#   out_model - a TFlearn model file
# params:
#   history - max length of sequence to feed into neural net
#   layers - number of hidden layers of the network
#   epochs - how many epochs to run
#   hidden_nodes - how many nodes per hidden layer
项目:DiagnosisPredictor    作者:Naresh1318    | 项目源码 | 文件源码
def patient_output(vector_rep_patient):
    # The vector representation for the patient sequence
    vector_rep_patient = convert_seq_to_vec(vector_rep_patient)

    # load the sc model
    sc = joblib.load('../Predictor_Tfidf/Saved_Models/Fully_Connected_n_epochs_10/standard.pkl')
    patient_seq = sc.transform(vector_rep_patient.toarray())
    generate_icd9_lookup()  # generate the lookup for each diagnosis

    for c, d in enumerate(uniq_diag):

        # Run each iteration in a graph
        with tf.Graph().as_default():
            # Model
            input_layer = tflearn.input_data(shape=[None, 1391], name='input')
            dense1 = tflearn.fully_connected(input_layer, 128, activation='linear', name='dense1')
            dropout1 = tflearn.dropout(dense1, 0.8)
            dense2 = tflearn.fully_connected(dropout1, 128, activation='linear', name='dense2')
            dropout2 = tflearn.dropout(dense2, 0.8)
            output = tflearn.fully_connected(dropout2, 2, activation='softmax', name='output')
            regression = tflearn.regression(output, optimizer='adam', loss='categorical_crossentropy',
                                            learning_rate=.001)

            # Define model with checkpoint (autosave)
            model = tflearn.DNN(regression, tensorboard_verbose=3)

            # load the previously trained model
            model.load('../Predictor_Tfidf/Saved_Models/Fully_Connected_n_epochs_{0}/'
                       'dense_fully_connected_dropout_5645_{1}.tfl'
                       .format(n_epoch, d))

            # Standardize the values and predict the output
            vector_rep_patient_sc = np.reshape(patient_seq, (1, 1391))
            # Find the probability of outputs
            Prediction_for_patient_prob[d] = np.array(model.predict(vector_rep_patient_sc))[:, 1]

            Prediction_for_patient[d] = np.where(Prediction_for_patient_prob[d] > 0.5, 1., 0.)

            print('Completed : {0}/{1}'.format(c + 1, len(uniq_diag)))


# Print the predictions for the patient's input