Python tflearn 模块,fully_connected() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tflearn.fully_connected()

项目:easygen    作者:markriedl    | 项目源码 | 文件源码
def buildModel(layers, hidden_nodes, maxlen, char_idx, dropout = False):
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    for n in range(layers-1):
        g = tflearn.lstm(g, hidden_nodes, return_seq=True)
        if dropout:
            g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, hidden_nodes)
    if dropout:
        g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)
    return g

# inputs:
#   data - textfile
# outputs:
#   model - a TFlearn model file
#   dictionary - char_idx pickle
# params:
#   history - max length of sequence to feed into neural net
#   layers - number of hidden layers of the network
#   epochs - how many epochs to run
#   hidden_nodes - how many nodes per hidden layer
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def convolve_me(self, hyp, pd):
        network = input_data(shape=[None, pd.max_sequence], name='input')
        network = tflearn.embedding(network,
                                    input_dim=pd.vocab_size,
                                    output_dim=pd.emb_size,
                                    name="embedding")
        branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
        branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
        branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
        network = merge([branch1, branch2, branch3], mode='concat', axis=1)
        network = tf.expand_dims(network, 2)
        network = global_max_pool(network)
        network = dropout(network, 0.5)
        network = fully_connected(network, 2, activation='softmax')
        network = regression(network, optimizer='adam', learning_rate=0.001,
                             loss='categorical_crossentropy', name='target')
        return network
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims, return_seq=True)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def basic_pony(self, hyp, pd):
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding")
        net = tflearn.lstm(net,
                           32,
                           dynamic=False,
                           name="lstm")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      name="output",
                                      restore=True)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def little_pony(self, hyp, pd):
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding")
        net = tflearn.lstm(net,
                           256,
                           dynamic=True,
                           name="lstm")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      name="output",
                                      restore=True)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=0.01,
                                 loss='categorical_crossentropy')
        return net
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def little_gru(self, hyp, pd):
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding")
        net = tflearn.gru(net,
                           256,
                           dynamic=True,
                           name="gru")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      name="output",
                                      restore=True)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __init__(self):
        inputs = tflearn.input_data(shape=[None, 784], name="input")

        with tf.variable_scope("scope1") as scope:
            net_conv = Model1.make_core_network(inputs) # shape (?, 10)
        with tf.variable_scope("scope2") as scope:
            net_dnn = Model2.make_core_network(inputs)  # shape (?, 10)

        network = tf.concat([net_conv, net_dnn], 1, name="concat")  # shape (?, 20)
        network = tflearn.fully_connected(network, 10, activation="softmax")
        network = regression(network, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')

        self.model = tflearn.DNN(network, tensorboard_verbose=0)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def build_simple_model(self):
        """Build a simple model for test
        Returns:
            DNN, [ (input layer name, input placeholder, input data) ], Target data
        """
        inputPlaceholder1, inputPlaceholder2 = \
            tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
        input1 = tflearn.input_data(placeholder = inputPlaceholder1)
        input2 = tflearn.input_data(placeholder = inputPlaceholder2)
        network = tflearn.merge([ input1, input2 ], "sum")
        network = tflearn.reshape(network, (1, 1))
        network = tflearn.fully_connected(network, 1)
        network = tflearn.regression(network)
        return (
            tflearn.DNN(network),
            [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
            self.TARGET,
        )
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def make_network(look_back, batch_size):
    """
    Declare the layer types and sizes
    """
    # create deep neural network with LSTM and fully connected layers
    net = tfl.input_data(shape=[None, look_back, 1], name='input')
    net = tfl.lstm(net, 32, activation='tanh', weights_init='xavier', name='LSTM1')

    net = tfl.fully_connected(net, 20, activation='relu', name='FC1')
    # net = tfl.dropout(net, 0.5)
    net = tfl.fully_connected(net, 40, activation='relu', name='FC2')
    # net = tfl.dropout(net, 0.5)

    net = tfl.fully_connected(net, 1, activation='linear', name='Linear')
    net = tfl.regression(net, batch_size=batch_size, optimizer='adam', learning_rate=0.005, loss='mean_square',
                         name='target')
    col = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    for x in col:
        tf.add_to_collection(tf.GraphKeys.VARIABLES, x)
    return net
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.fully_connected(inputs, 400, activation='relu')

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 300)
        t2 = tflearn.fully_connected(action, 300)

        net = tflearn.activation(tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')

        # linear layer connected to 1 output representing Q(s,a) 
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def create_critic_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        action = tflearn.input_data(shape=[None, self.a_dim])
        net = tflearn.fully_connected(inputs, 400)
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)

        # Add the action tensor in the 2nd hidden layer
        # Use two temp layers to get the corresponding weights and biases
        t1 = tflearn.fully_connected(net, 300)
        t2 = tflearn.fully_connected(action, 300)

        net = tflearn.activation(
            tf.matmul(net, t1.W) + tf.matmul(action, t2.W) + t2.b, activation='relu')

        # linear layer connected to 1 output representing Q(s,a)
        # Weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, 1, weights_init=w_init)
        return inputs, action, out
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def __init__(self, s_date):
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/lstm/%s' % pred_s_date
        self.model_dir = '../model/tflearn/lstm/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 30, 23], name='input')
        lstm1 = tflearn.lstm(input_layer, 23, dynamic=True, name='lstm1')
        dense1 = tflearn.fully_connected(lstm1, 1, name='dense1')
        output = tflearn.single_unit(dense1)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def __init__(self):
        self.len_past = 30
        #self.s_date = "20120101_20160330"
        #self.model_dir = '../model/tflearn/reg_l3_bn/big/%s/' % self.s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.05)
        input_layer = tflearn.input_data(shape=[None, 690], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        self.qty = {}
        self.day_last = {}
        self.currency = 100000000
项目:npi    作者:siddk    | 项目源码 | 文件源码
def key_net(self):
        """
        Build the NPI Key Network, that takes in the NPI Core Hidden State, and returns a softmax
        distribution over possible next programs.

        References: Reed, de Freitas [3, 4]
        """
        # Get Key from Key Network
        hidden = tflearn.fully_connected(self.h, self.key_dim, activation='elu', regularizer='L2')
        key = tflearn.fully_connected(hidden, self.key_dim)    # Shape: [bsz, key_dim]

        # Perform dot product operation, then softmax over all options to generate distribution
        key = tflearn.reshape(key, [-1, 1, self.key_dim])
        key = tf.tile(key, [1, self.num_progs, 1])             # Shape: [bsz, n_progs, key_dim]
        prog_sim = tf.mul(key, self.core.program_key)          # Shape: [bsz, n_progs, key_dim]
        prog_dist = tf.reduce_sum(prog_sim, [2])               # Shape: [bsz, n_progs]
        return prog_dist
项目:npi    作者:siddk    | 项目源码 | 文件源码
def build_encoder(self):
        """
        Build the Encoder Network (f_enc) taking the environment state (env_in) and the program
        arguments (arg_in), feeding through a Multilayer Perceptron, to generate the state encoding
        (s_t).

        Reed, de Freitas only specify that the f_enc is a Multilayer Perceptron => As such we use
        two ELU Layers, up-sampling to a state vector with dimension 128.

        Reference: Reed, de Freitas [9]
        """
        merge = tflearn.merge([self.env_in, self.arg_in], 'concat')
        elu = tflearn.fully_connected(merge, self.hidden_dim, activation='elu')
        elu = tflearn.fully_connected(elu, self.hidden_dim, activation='elu')
        out = tflearn.fully_connected(elu, self.state_dim)
        return out
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:make-lstm-great-again    作者:eleurent    | 项目源码 | 文件源码
def build_model(maxlen, char_idx, checkpoint_path):
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512, return_seq=True)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, 512)
    g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',
                           learning_rate=0.001)

    return tflearn.SequenceGenerator(g, dictionary=char_idx,
                                     seq_maxlen=maxlen,
                                     clip_gradients=5.0,
                                     checkpoint_path=checkpoint_path)
项目:icnn    作者:locuslab    | 项目源码 | 文件源码
def f_ficnn(self, x, y, reuse=False):
        fc = tflearn.fully_connected
        xy = tf.concat(1, (x, y))

        prevZ = None
        for i, sz in enumerate([200, 200, 1]):
            z_add = []

            with tf.variable_scope('z_x{}'.format(i)) as s:
                z_x = fc(xy, sz, reuse=reuse, scope=s, bias=True)
                z_add.append(z_x)

            if prevZ is not None:
                with tf.variable_scope('z_z{}_proj'.format(i)) as s:
                    z_z = fc(prevZ, sz, reuse=reuse, scope=s, bias=False)
                    z_add.append(z_z)

            if sz != 1:
                z = tf.nn.relu(tf.add_n(z_add))
            prevZ = z

        return tf.contrib.layers.flatten(z)
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def big_boy(self, hyp, pd):
        restore = True
        net = tflearn.input_data([None, pd.max_sequence], dtype=tf.float32)
        net = tflearn.embedding(net, input_dim=pd.vocab_size,
                                     output_dim=pd.emb_size,
                                     name="embedding",
                                     restore=restore)

        net = tflearn.lstm(net,
                           512,
                           dropout=hyp.lstm.dropout,
                           weights_init='uniform_scaling',
                           dynamic=True,
                           name="lstm",
                           restore=restore)

        net = tflearn.fully_connected(net,
                                      128,
                                      activation='sigmoid',
                                      regularizer='L2',
                                      weight_decay=hyp.middle.weight_decay,
                                      weights_init='uniform_scaling',
                                      name="middle",
                                      restore=restore)

        net = tflearn.dropout(net, hyp.dropout.dropout, name="dropout")
        net = tflearn.fully_connected(net,
                                      2,
                                      activation='softmax',
                                      regularizer='L2',
                                      weight_decay=hyp.output.weight_decay,
                                      weights_init='uniform_scaling',
                                      name="output",
                                      restore=restore)
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 learning_rate=hyp.regression.learning_rate,
                                 loss='categorical_crossentropy')
        return net
项目:DeepOSM    作者:trailbehind    | 项目源码 | 文件源码
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == 'one_layer_relu':
        network = tflearn.fully_connected(network, 64, activation='relu')
    elif neural_net_type == 'one_layer_relu_conv':
        network = conv_2d(network, 64, 12, strides=4, activation='relu')
        network = max_pool_2d(network, 3)
    elif neural_net_type == 'two_layer_relu_conv':
        network = conv_2d(network, 64, 12, strides=4, activation='relu')
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation='relu')
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation='softmax')

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(
        learning_rate=.005, momentum=0.9,
        lr_decay=0.0002, name='Momentum')

    net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')

    return tflearn.DNN(net, tensorboard_verbose=0)
项目:skill-voice-recognition    作者:TREE-Edu    | 项目源码 | 文件源码
def handle_speaker_rec_test_intent(self, message):
        speakers = data.get_speakers()
        number_classes=len(speakers)
        #print("speakers",speakers)

        #batch=data.wave_batch_generator(batch_size=1000, source=data.Source.DIGIT_WAVES, target=data.Target.speaker)
        #X,Y=next(batch)


        # Classification
        #tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

        net = tflearn.input_data(shape=[None, 8192]) #Two wave chunks
        net = tflearn.fully_connected(net, 64)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.fully_connected(net, number_classes, activation='softmax')
        net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

        model = tflearn.DNN(net)
        #model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=100)

        CWD_PATH = os.path.dirname(__file__)
        path_to_model = os.path.join(CWD_PATH, 'model', 'model.tfl')
        model.load(path_to_model) 

        demo_file = "8_Vicki_260.wav"
        #demo_file = "8_Bruce_260.wav"
        demo=data.load_wav_file(data.path + demo_file)
        result=model.predict([demo])
        result=data.one_hot_to_item(result,speakers)
        if result == "Vicki":
            self.speak("I am confident I'm speaking to %s"%(result)) # ~ 97% correct
        else:
            self.speak("I'm sorry I don't recognize your voice")
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def deep_model(self, wide_inputs, n_inputs, n_nodes=[100, 50], use_dropout=False):
        '''
        Model - deep, i.e. two-layer fully connected network model
        '''
        cc_input_var = {}
        cc_embed_var = {}
        flat_vars = []
        if self.verbose:
            print ("--> deep model: %s categories, %d continuous" % (len(self.categorical_columns), n_inputs))
        for cc, cc_size in self.categorical_columns.items():
            cc_input_var[cc] = tflearn.input_data(shape=[None, 1], name="%s_in" % cc,  dtype=tf.int32)
            # embedding layers only work on CPU!  No GPU implementation in tensorflow, yet!
            cc_embed_var[cc] = tflearn.layers.embedding_ops.embedding(cc_input_var[cc],    cc_size,  8, name="deep_%s_embed" % cc)
            if self.verbose:
                print ("    %s_embed = %s" % (cc, cc_embed_var[cc]))
            flat_vars.append(tf.squeeze(cc_embed_var[cc], squeeze_dims=[1], name="%s_squeeze" % cc))

        network = tf.concat([wide_inputs] + flat_vars, 1, name="deep_concat")
        for k in range(len(n_nodes)):
            network = tflearn.fully_connected(network, n_nodes[k], activation="relu", name="deep_fc%d" % (k+1))
            if use_dropout:
                network = tflearn.dropout(network, 0.5, name="deep_dropout%d" % (k+1))
        if self.verbose:
            print ("Deep model network before output %s" % network)
        network = tflearn.fully_connected(network, 1, activation="linear", name="deep_fc_output", bias=False)
        network = tf.reshape(network, [-1, 1])  # so that accuracy is binary_accuracy
        if self.verbose:
            print ("Deep model network %s" % network)
        return network
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def wide_model(self, inputs, n_inputs):
        '''
        Model - wide, i.e. normal linear model (for logistic regression)
        '''
        network = inputs
        # use fully_connected (instad of single_unit) because fc works properly with batches, whereas single_unit is 1D only
        network = tflearn.fully_connected(network, n_inputs, activation="linear", name="wide_linear", bias=False)   # x*W (no bias)
        network = tf.reduce_sum(network, 1, name="reduce_sum")  # batched sum, to produce logits
        network = tf.reshape(network, [-1, 1])  # so that accuracy is binary_accuracy
        if self.verbose:
            print ("Wide model network %s" % network)
        return network
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def make_core_network(network):
        network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
        network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
        network = max_pool_2d(network, 2)
        network = local_response_normalization(network)
        network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
        network = max_pool_2d(network, 2)
        network = local_response_normalization(network)
        network = fully_connected(network, 128, activation='tanh')
        network = dropout(network, 0.8)
        network = fully_connected(network, 256, activation='tanh')
        network = dropout(network, 0.8)
        network = fully_connected(network, 10, activation='softmax')
        return network
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def make_core_network(network):
        dense1 = tflearn.fully_connected(network, 64, activation='tanh',
                                         regularizer='L2', weight_decay=0.001, name="dense1")
        dropout1 = tflearn.dropout(dense1, 0.8)
        dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                         regularizer='L2', weight_decay=0.001, name="dense2")
        dropout2 = tflearn.dropout(dense2, 0.8)
        softmax = tflearn.fully_connected(dropout2, 10, activation='softmax', name="softmax")
        return softmax
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def generator(x, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        x = tflearn.fully_connected(x, n_units=7 * 7 * 128)
        x = tflearn.batch_normalization(x)
        x = tf.nn.tanh(x)
        x = tf.reshape(x, shape=[-1, 7, 7, 128])
        x = tflearn.upsample_2d(x, 2)
        x = tflearn.conv_2d(x, 64, 5, activation='tanh')
        x = tflearn.upsample_2d(x, 2)
        x = tflearn.conv_2d(x, 1, 5, activation='sigmoid')
        return x


# Discriminator
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def discriminator(x, reuse=False):
    with tf.variable_scope('Discriminator', reuse=reuse):
        x = tflearn.conv_2d(x, 64, 5, activation='tanh')
        x = tflearn.avg_pool_2d(x, 2)
        x = tflearn.conv_2d(x, 128, 5, activation='tanh')
        x = tflearn.avg_pool_2d(x, 2)
        x = tflearn.fully_connected(x, 1024, activation='tanh')
        x = tflearn.fully_connected(x, 2)
        x = tf.nn.softmax(x)
        return x


# Input Data
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def generator(x, reuse=False):
    with tf.variable_scope('Generator', reuse=reuse):
        x = tflearn.fully_connected(x, 256, activation='relu')
        x = tflearn.fully_connected(x, image_dim, activation='sigmoid')
        return x


# Discriminator
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def discriminator(x, reuse=False):
    with tf.variable_scope('Discriminator', reuse=reuse):
        x = tflearn.fully_connected(x, 256, activation='relu')
        x = tflearn.fully_connected(x, 1, activation='sigmoid')
        return x

# Build Networks
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def vgg16(input, num_class):

    x = tflearn.conv_2d(input, 64, 3, activation='relu', scope='conv1_1')
    x = tflearn.conv_2d(x, 64, 3, activation='relu', scope='conv1_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool1')

    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_1')
    x = tflearn.conv_2d(x, 128, 3, activation='relu', scope='conv2_2')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool2')

    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_1')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_2')
    x = tflearn.conv_2d(x, 256, 3, activation='relu', scope='conv3_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool3')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv4_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool4')

    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_1')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_2')
    x = tflearn.conv_2d(x, 512, 3, activation='relu', scope='conv5_3')
    x = tflearn.max_pool_2d(x, 2, strides=2, name='maxpool5')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc6')
    x = tflearn.dropout(x, 0.5, name='dropout1')

    x = tflearn.fully_connected(x, 4096, activation='relu', scope='fc7')
    x = tflearn.dropout(x, 0.5, name='dropout2')

    x = tflearn.fully_connected(x, num_class, activation='softmax', scope='fc8',
                                restore=False)

    return x
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_recurrent_layers(self):

        X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]]
        Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.embedding(g, input_dim=12, output_dim=4)
            g = tflearn.lstm(g, 6)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=300, snapshot_epoch=False)
            self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_regression_placeholder(self):
        '''
        Check that regression does not duplicate placeholders
        '''

        with tf.Graph().as_default():

            g = tflearn.input_data(shape=[None, 2])
            g_nand = tflearn.fully_connected(g, 1, activation='linear')
            with tf.name_scope("Y"):
                Y_in = tf.placeholder(shape=[None, 1], dtype=tf.float32, name="Y")
            tflearn.regression(g_nand, optimizer='sgd',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression1",
                               name="Y")
            # for this test, just use the same default trainable_vars
            # in practice, this should be different for the two regressions
            tflearn.regression(g_nand, optimizer='adam',
                               placeholder=Y_in,
                               learning_rate=2.,
                               loss='binary_crossentropy', 
                               op_name="regression2",
                               name="Y")

            self.assertEqual(len(tf.get_collection(tf.GraphKeys.TARGETS)), 1)
项目:icyface_api    作者:bupticybee    | 项目源码 | 文件源码
def recognize(picbase64):
    global sess,fully_connected,input_layer
    image = Image.open(cStringIO.StringIO(base64.b64decode(picbase64)))
    imagearr = np.asarray(image)
    imagearr = exame_image(imagearr)
    box = cbox.get_facebox(imagearr)
    segmented = em.segment(imagearr,box,None)
    segmented = segmented[0][1]
    vector = sess.run(fully_connected,feed_dict={
        input_layer:np.asarray(imresize(segmented,(224,224))
            .reshape((1,224,224,3)),dtype=np.float
            ) / 255})[0]
    return box,vector
项目:easygen    作者:markriedl    | 项目源码 | 文件源码
def CharacterLSTM_Run(seed, dictionary, model, output, steps = 600, layers = 3, hidden_nodes = 512, history = 25, temperature = 0.5, dropout = False):
    char_idx_file = dictionary
    maxlen = history

    char_idx = None
    if os.path.isfile(char_idx_file):
        print('Loading previous char_idx')
        char_idx = pickle.load(open(char_idx_file, 'rb'))

    tf.reset_default_graph()
    g = buildModel(layers, hidden_nodes, maxlen, char_idx, dropout)
    '''
    g = tflearn.input_data([None, maxlen, len(char_idx)])
    for n in range(layers-1):
        g = tflearn.lstm(g, hidden_nodes, return_seq=True)
        if dropout:
            g = tflearn.dropout(g, 0.5)
    g = tflearn.lstm(g, hidden_nodes)
    if dropout:
        g = tflearn.dropout(g, 0.5)
    g = tflearn.fully_connected(g, len(char_idx), activation='softmax')
    g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', learning_rate=0.001)
    '''
    m = tflearn.SequenceGenerator(g, dictionary=char_idx, seq_maxlen=maxlen, clip_gradients=5.0) #, checkpoint_path='model_history_gen')

    m.load(model)

    #seed = random_sequence_from_textfile(data, maxlen)

    print('seed='+seed)
    print('len=' + str(len(seed)))
    result = m.generate(steps, temperature=temperature, seq_seed=seed[:history])
    print (result)
    return result
项目:continuous-online-video-classification-blog    作者:harvitronix    | 项目源码 | 文件源码
def get_network(frames, input_size, num_classes):
    """Create our LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 128, dropout=0.8, return_seq=True)
    net = tflearn.lstm(net, 128)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name="output1")
    return net
项目:continuous-online-video-classification-blog    作者:harvitronix    | 项目源码 | 文件源码
def get_network_deep(frames, input_size, num_classes):
    """Create a deeper LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True)
    net = tflearn.lstm(net, 64, dropout=0.2, return_seq=True)
    net = tflearn.lstm(net, 64, dropout=0.2)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name="output1")
    return net
项目:continuous-online-video-classification-blog    作者:harvitronix    | 项目源码 | 文件源码
def get_network_wide(frames, input_size, num_classes):
    """Create a wider LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 256, dropout=0.2)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name='output1')
    return net
项目:continuous-online-video-classification-blog    作者:harvitronix    | 项目源码 | 文件源码
def get_network_wider(frames, input_size, num_classes):
    """Create a wider LSTM"""
    net = tflearn.input_data(shape=[None, frames, input_size])
    net = tflearn.lstm(net, 512, dropout=0.2)
    net = tflearn.fully_connected(net, num_classes, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy', name='output1')
    return net
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def create_actor_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        net = tflearn.fully_connected(inputs, 400, activation='relu')
        net = tflearn.fully_connected(net, 300, activation='relu')
        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(net, self.a_dim, activation='tanh', weights_init=w_init)
        scaled_out = tf.multiply(out, self.action_bound)  # Scale output to -action_bound to action_bound
        return inputs, out, scaled_out
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def create_actor_network(self):
        inputs = tflearn.input_data(shape=[None, self.s_dim])
        net = tflearn.fully_connected(inputs, 400)
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        net = tflearn.fully_connected(net, 300)
        net = tflearn.layers.normalization.batch_normalization(net)
        net = tflearn.activations.relu(net)
        # Final layer weights are init to Uniform[-3e-3, 3e-3]
        w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
        out = tflearn.fully_connected(
            net, self.a_dim, activation='tanh', weights_init=w_init)
        # Scale output to -action_bound to action_bound
        scaled_out = tf.multiply(out, self.action_bound)
        return inputs, out, scaled_out
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
项目:npi    作者:siddk    | 项目源码 | 文件源码
def terminate_net(self):
        """
        Build the NPI Termination Network, that takes in the NPI Core Hidden State, and returns
        the probability of terminating program.

        References: Reed, de Freitas [3]
        """
        p_terminate = tflearn.fully_connected(self.h, 2, activation='linear', regularizer='L2')
        return p_terminate                                      # Shape: [bsz, 2]
项目:npi    作者:siddk    | 项目源码 | 文件源码
def argument_net(self):
        """
        Build the NPI Argument Networks (a separate net for each argument), each of which takes in
        the NPI Core Hidden State, and returns a softmax over the argument dimension.

        References: Reed, de Freitas [3]
        """
        args = []
        for i in range(self.num_args):
            arg = tflearn.fully_connected(self.h, self.arg_depth, activation='linear',
                                          regularizer='L2', name='Argument_{}'.format(str(i)))
            args.append(arg)
        return args                                             # Shape: [bsz, arg_depth]
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def build(embedding_size=(400000, 50), train_embedding=False, hidden_dims=128,
          learning_rate=0.001):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=embedding_size[0],
                            output_dim=embedding_size[1],
                            trainable=train_embedding, name='EmbeddingLayer')
    net = tflearn.lstm(net, hidden_dims)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
                             loss='categorical_crossentropy')
    return net
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def generate_net(embedding):
    net = tflearn.input_data([None, 200])
    net = tflearn.embedding(net, input_dim=300000, output_dim=128)
    net = tflearn.lstm(net, 128)
    net = tflearn.dropout(net, 0.5)
    net = tflearn.fully_connected(net, 2, activation='softmax')
    net = tflearn.regression(net, optimizer='adam',
                             loss='categorical_crossentropy')
    return net