Python tflearn 模块,DNN 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tflearn.DNN

项目:Convolutional-Autoencoder    作者:OliverEdholm    | 项目源码 | 文件源码
def main():
    model = train_autoencoder.build_model()
    model = tflearn.DNN(model)

    logging.info('loading checkpoint')
    checkpoint_path = sys.argv[1]
    model.load(checkpoint_path)

    img_path = sys.argv[2]
    img_arr = get_img(img_path)

    logging.info('getting output')
    pred = model.predict(img_arr)

    logging.debug('saving output to output.jpg')
    pred = pred[0]
    pred_img = image.array_to_img(pred)
    pred_img.save('output.jpg')
项目:Emotion-Recognition    作者:Shujathlive    | 项目源码 | 文件源码
def build_network(self):
    # Smaller 'AlexNet'
    # https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
    print('[+] Building CNN')
    self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
    self.network = conv_2d(self.network, 64, 5, activation = 'relu')
    #self.network = local_response_normalization(self.network)
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = conv_2d(self.network, 64, 5, activation = 'relu')
    self.network = max_pool_2d(self.network, 3, strides = 2)
    self.network = conv_2d(self.network, 128, 4, activation = 'relu')
    self.network = dropout(self.network, 0.3)
    self.network = fully_connected(self.network, 3072, activation = 'relu')
    self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
    self.network = regression(self.network,
      optimizer = 'momentum',
      loss = 'categorical_crossentropy')
    self.model = tflearn.DNN(
      self.network,
      checkpoint_path = SAVE_DIRECTORY + '/emotion_recognition',
      max_checkpoints = 1,
      tensorboard_verbose = 2
    )
    self.load_model()
项目:google_ml_challenge    作者:SSUHan    | 项目源码 | 文件源码
def neural_network_model(input_size):
    net = input_data(shape=[None, input_size, 1], name='input')

    net = fully_connected(net, 128, activation='relu')
    net = dropout(net, 0.8)

    net = fully_connected(net, 256, activation='relu')
    net = dropout(net, 0.8)

    net = fully_connected(net, 512, activation='relu')
    net = dropout(net, 0.8)

    net = fully_connected(net, 256, activation='relu')
    net = dropout(net, 0.8)

    net = fully_connected(net, 128, activation='relu')
    net = dropout(net, 0.8)

    net = fully_connected(net, 2, activation='softmax')
    net = regression(net, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')

    model = tflearn.DNN(net, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __init__(self):
        inputs = tflearn.input_data(shape=[None, 784], name="input")

        with tf.variable_scope("scope1") as scope:
            net_conv = Model1.make_core_network(inputs) # shape (?, 10)
        with tf.variable_scope("scope2") as scope:
            net_dnn = Model2.make_core_network(inputs)  # shape (?, 10)

        network = tf.concat([net_conv, net_dnn], 1, name="concat")  # shape (?, 20)
        network = tflearn.fully_connected(network, 10, activation="softmax")
        network = regression(network, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')

        self.model = tflearn.DNN(network, tensorboard_verbose=0)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_feed_dict_no_None(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4], name="X_in")
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)

            def do_fit():
                m.fit({"X_in": X, 'non_existent': X}, Y, n_epoch=30, snapshot_epoch=False)
            self.assertRaisesRegexp(Exception, "Feed dict asks for variable named 'non_existent' but no such variable is known to exist", do_fit)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def build_simple_model(self):
        """Build a simple model for test
        Returns:
            DNN, [ (input layer name, input placeholder, input data) ], Target data
        """
        inputPlaceholder1, inputPlaceholder2 = \
            tf.placeholder(tf.float32, (1, 1), name = "input1"), tf.placeholder(tf.float32, (1, 1), name = "input2")
        input1 = tflearn.input_data(placeholder = inputPlaceholder1)
        input2 = tflearn.input_data(placeholder = inputPlaceholder2)
        network = tflearn.merge([ input1, input2 ], "sum")
        network = tflearn.reshape(network, (1, 1))
        network = tflearn.fully_connected(network, 1)
        network = tflearn.regression(network)
        return (
            tflearn.DNN(network),
            [ ("input1:0", inputPlaceholder1, self.INPUT_DATA_1), ("input2:0", inputPlaceholder2, self.INPUT_DATA_2) ],
            self.TARGET,
        )
项目:continuous-online-video-classification-blog    作者:harvitronix    | 项目源码 | 文件源码
def main(filename, frames, batch_size, num_classes, input_length):
    """From the blog post linked above."""
    # Get our data.
    X_train, X_test, y_train, y_test = get_data(filename, frames, num_classes, input_length)

    # Get sizes.
    num_classes = len(y_train[0])

    # Get our network.
    net = get_network_wide(frames, input_length, num_classes)

    # Train the model.
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X_train, y_train, validation_set=(X_test, y_test),
              show_metric=True, batch_size=batch_size, snapshot_step=100,
              n_epoch=4)

    # Save it.
    model.save('checkpoints/rnn.tflearn')
项目:continuous-online-video-classification-blog    作者:harvitronix    | 项目源码 | 文件源码
def main(filename, frames, batch_size, num_classes, input_length):
    """From the blog post linked above."""
    # Get our data.
    X_train, _, y_train, _ = get_data(filename, frames, num_classes, input_length)

    # Get sizes.
    num_classes = len(y_train[0])

    # Get our network.
    net = get_network_wide(frames, input_length, num_classes)

    # Get our model.
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.load('checkpoints/rnn.tflearn')

    # Evaluate.
    print(model.evaluate(X_train, y_train))
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def train_network(net, epochs, train, valid, asset):
    """
    Run training for epochs iterations
    train: tuple of (data, target)
    valid: tuple of (data, target)
    """
    # declare model
    model = tfl.DNN(net, tensorboard_dir="./logs_tb", tensorboard_verbose=3)
    # Train model
    model.fit({'input': train[0]}, {'target': train[1]}, n_epoch=epochs,
              validation_set=({'input': valid[0]}, {'target': valid[1]}),
              show_metric=True, shuffle=False)
    directory = "networks/" + asset
    if not os.path.exists(directory):
        os.makedirs(directory)
    model.save(directory + "/lstm3.tflearn")

    return model
项目:DeepAudioClassification    作者:despoisj    | 项目源码 | 文件源码
def createModel(nbClasses,imageSize):
    print("[+] Creating model...")
    convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')

    convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
    convnet = max_pool_2d(convnet, 2)

    convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
    convnet = max_pool_2d(convnet, 2)

    convnet = fully_connected(convnet, 1024, activation='elu')
    convnet = dropout(convnet, 0.5)

    convnet = fully_connected(convnet, nbClasses, activation='softmax')
    convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')

    model = tflearn.DNN(convnet)
    print("    Model created! ?")
    return model
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def __init__(self, s_date):
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/lstm/%s' % pred_s_date
        self.model_dir = '../model/tflearn/lstm/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 30, 23], name='input')
        lstm1 = tflearn.lstm(input_layer, 23, dynamic=True, name='lstm1')
        dense1 = tflearn.fully_connected(lstm1, 1, name='dense1')
        output = tflearn.single_unit(dense1)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def __init__(self):
        self.len_past = 30
        #self.s_date = "20120101_20160330"
        #self.model_dir = '../model/tflearn/reg_l3_bn/big/%s/' % self.s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.05)
        input_layer = tflearn.input_data(shape=[None, 690], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        self.qty = {}
        self.day_last = {}
        self.currency = 100000000
项目:rcnn-with-tflearn    作者:Redoblue    | 项目源码 | 文件源码
def train_svms():
    if not os.path.isfile('models/fine_tune.model.index'):
        print('models/fine_tune.model doesn\'t exist.')
        return

    net = create_alexnet()
    model = tflearn.DNN(net)
    model.load('models/fine_tune.model')

    train_file_dir = 'svm_train/'
    flist = os.listdir(train_file_dir)
    svms = []
    for train_file in flist:
        if "pkl" in train_file:
            continue
        X, Y = generate_single_svm_train_data(train_file_dir + train_file)
        train_features = []
        for i in X:
            feats = model.predict([i])
            train_features.append(feats[0])
        print("feature dimension of fitting: {}".format(np.shape(train_features)))
        clf = svm.LinearSVC()
        clf.fit(train_features, Y)
        svms.append(clf)
    joblib.dump(svms, 'models/train_svm.model')
项目:rcnn-with-tflearn    作者:Redoblue    | 项目源码 | 文件源码
def pre_train():
    if os.path.isfile('models/pre_train.model.index'):
        print("Previous trained model exist.")
        return

    X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))
    net = create_alexnet(17)
    model = tflearn.DNN(net, checkpoint_path='ckps/pre_train.ckp',
                        max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='tmp/pre_train_logs/')
    if os.path.isfile('models/pre_train.model'):
        model.load('models/pre_train.model')
    model.fit(X, Y, n_epoch=100, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='pre_train')
    # Save the model
    model.save('models/pre_train.model')
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def train():
    embedding = generate_embedding()
    data = utils.load_sst('sst_data.pkl')
    net = generate_net(embedding)
    print("Loading model definition for %s..." % model)
    model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=0)
    net = models.get_model(model)

    print("Training...")
    model.fit(data.trainX, data.trainY,
              validation_set=(data.valX, data.valY),
              show_metric=True, batch_size=128)

    print("Saving Model...")
    model_path = '%s.tflearn' % model
    model.save(model_path)
    print("Saved model to %s" % model_path)
项目:suiron    作者:kendricktan    | 项目源码 | 文件源码
def get_nn_model(checkpoint_path='nn_motor_model', session=None):
    # Input is a single value (raw motor value)
    network = input_data(shape=[None, 1], name='input')

    # Hidden layer no.1,  
    network = fully_connected(network, 12, activation='linear')

    # Output layer
    network = fully_connected(network, 1, activation='tanh')

    # regression
    network = regression(network, loss='mean_square', metric='accuracy', name='target')

    # Verbosity yay nay
    model = tflearn.DNN(network, tensorboard_verbose=3, checkpoint_path=checkpoint_path, session=session)
    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    net = input_data(shape=[None, width, height, 3], name='input')
    net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
    net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
    net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 32, 32)
    net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
    net = tflearn.resnext_block(net, n-1, 64, 32)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    net = tflearn.global_avg_pool(net)
    # Regression
    net = tflearn.fully_connected(net, output, activation='softmax')
    opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
    net = tflearn.regression(net, optimizer=opt,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:identifiera-sarkasm    作者:risnejunior    | 项目源码 | 文件源码
def create_model(net):
    best_path = os.path.join(temp_dir_best.name, 'checkpoint-best-')
    chkpt_path = os.path.join(temp_dir_checkpoints.name, 'checkpoint-')

    model = tflearn.DNN(
        net,
        tensorboard_verbose=3,
        checkpoint_path=chkpt_path,
        best_checkpoint_path=best_path,
        best_val_accuracy=0.0
    )

    #set embeddings
    if cfg.use_embeddings:
        emb = np.array(pd.embeddings[:pd.vocab_size], dtype=np.float32)
    else:
        emb = np.random.randn(pd.vocab_size, pd.emb_size).astype(np.float32)

    new_emb_t = tf.convert_to_tensor(emb)
    embeddings_tensor = tflearn.variables.get_layer_variables_by_name('embedding')[0]
    model.set_weights( embeddings_tensor, new_emb_t)
    w = model.get_weights(embeddings_tensor)
    debug_log.log(str(w.shape), "embedding layer shape", aslist=False)

    #debug_embeddingd(model, "fresh", embeddings_log)

    return model
项目:DeepOSM    作者:trailbehind    | 项目源码 | 文件源码
def model_for_type(neural_net_type, tile_size, on_band_count):
    """The neural_net_type can be: one_layer_relu,
                                   one_layer_relu_conv,
                                   two_layer_relu_conv."""
    network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])

    # NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    if neural_net_type == 'one_layer_relu':
        network = tflearn.fully_connected(network, 64, activation='relu')
    elif neural_net_type == 'one_layer_relu_conv':
        network = conv_2d(network, 64, 12, strides=4, activation='relu')
        network = max_pool_2d(network, 3)
    elif neural_net_type == 'two_layer_relu_conv':
        network = conv_2d(network, 64, 12, strides=4, activation='relu')
        network = max_pool_2d(network, 3)
        network = conv_2d(network, 128, 4, activation='relu')
    else:
        print("ERROR: exiting, unknown layer type for neural net")

    # classify as road or not road
    softmax = tflearn.fully_connected(network, 2, activation='softmax')

    # hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
    momentum = tflearn.optimizers.Momentum(
        learning_rate=.005, momentum=0.9,
        lr_decay=0.0002, name='Momentum')

    net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')

    return tflearn.DNN(net, tensorboard_verbose=0)
项目:skill-voice-recognition    作者:TREE-Edu    | 项目源码 | 文件源码
def handle_speaker_rec_test_intent(self, message):
        speakers = data.get_speakers()
        number_classes=len(speakers)
        #print("speakers",speakers)

        #batch=data.wave_batch_generator(batch_size=1000, source=data.Source.DIGIT_WAVES, target=data.Target.speaker)
        #X,Y=next(batch)


        # Classification
        #tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

        net = tflearn.input_data(shape=[None, 8192]) #Two wave chunks
        net = tflearn.fully_connected(net, 64)
        net = tflearn.dropout(net, 0.5)
        net = tflearn.fully_connected(net, number_classes, activation='softmax')
        net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

        model = tflearn.DNN(net)
        #model.fit(X, Y, n_epoch=100, show_metric=True, snapshot_step=100)

        CWD_PATH = os.path.dirname(__file__)
        path_to_model = os.path.join(CWD_PATH, 'model', 'model.tfl')
        model.load(path_to_model) 

        demo_file = "8_Vicki_260.wav"
        #demo_file = "8_Bruce_260.wav"
        demo=data.load_wav_file(data.path + demo_file)
        result=model.predict([demo])
        result=data.one_hot_to_item(result,speakers)
        if result == "Vicki":
            self.speak("I am confident I'm speaking to %s"%(result)) # ~ 97% correct
        else:
            self.speak("I'm sorry I don't recognize your voice")
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __init__(self):
        network = tflearn.input_data(shape=[None, 784], name="input")
        network = self.make_core_network(network)
        network = regression(network, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')

        model = tflearn.DNN(network, tensorboard_verbose=0)
        self.model = model
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __init__(self):
        # Building deep neural network
        network = tflearn.input_data(shape=[None, 784], name="input")
        network = self.make_core_network(network)

        # Regression using SGD with learning rate decay and Top-3 accuracy
        sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
        top_k = tflearn.metrics.Top_k(3)

        network = tflearn.regression(network, optimizer=sgd, metric=top_k,
                                 loss='categorical_crossentropy', name="target")
        model = tflearn.DNN(network, tensorboard_verbose=0)
        self.model = model
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_vbs1(self):

        with tf.Graph().as_default():
            # Data loading and preprocessing
            import tflearn.datasets.mnist as mnist
            X, Y, testX, testY = mnist.load_data(one_hot=True)
            X = X.reshape([-1, 28, 28, 1])
            testX = testX.reshape([-1, 28, 28, 1])
            X = X[:20, :, :, :]
            Y = Y[:20, :]
            testX = testX[:10, :, :, :]
            testY = testY[:10, :]

            # Building convolutional network
            network = input_data(shape=[None, 28, 28, 1], name='input')
            network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
            network = max_pool_2d(network, 2)
            network = local_response_normalization(network)
            network = fully_connected(network, 128, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 256, activation='tanh')
            network = dropout(network, 0.8)
            network = fully_connected(network, 10, activation='softmax')
            network = regression(network, optimizer='adam', learning_rate=0.01,
                                 loss='categorical_crossentropy', name='target')

            # Training
            model = tflearn.DNN(network, tensorboard_verbose=3)
            model.fit({'input': X}, {'target': Y}, n_epoch=1,
                      batch_size=10,
                      validation_set=({'input': testX}, {'target': testY}),
                      validation_batch_size=5,
                      snapshot_step=10, show_metric=True, run_id='convnet_mnist_vbs')

            self.assertEqual(model.train_ops[0].validation_batch_size, 5)
            self.assertEqual(model.train_ops[0].batch_size, 10)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_dnn(self):

        with tf.Graph().as_default():
            X = [3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,7.042,10.791,5.313,7.997,5.654,9.27,3.1]
            Y = [1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,2.827,3.465,1.65,2.904,2.42,2.94,1.3]
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)
            # Testing fit and predict
            m.fit(X, Y, n_epoch=1000, show_metric=True, snapshot_epoch=False)
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed! with score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed! with score: " + str(res) + " expected < 1.8")

            # Testing save method
            m.save("test_dnn.tflearn")
            self.assertTrue(os.path.exists("test_dnn.tflearn.index"))

        with tf.Graph().as_default():
            input = tflearn.input_data(shape=[None])
            linear = tflearn.single_unit(input)
            regression = tflearn.regression(linear, optimizer='sgd', loss='mean_square',
                                            metric='R2', learning_rate=0.01)
            m = tflearn.DNN(regression)

            # Testing load method
            m.load("test_dnn.tflearn")
            res = m.predict([3.2])[0]
            self.assertGreater(res, 1.3, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected > 1.3")
            self.assertLess(res, 1.8, "DNN test (linear regression) failed after loading model! score: " + str(res) + " expected < 1.8")
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_conv_layers(self):

        X = [[0., 0., 0., 0.], [1., 1., 1., 1.], [0., 0., 1., 0.], [1., 1., 1., 0.]]
        Y = [[1., 0.], [0., 1.], [1., 0.], [0., 1.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2, activation='relu')
            g = tflearn.max_pool_2d(g, 2)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=100, snapshot_epoch=False)
            # TODO: Fix test
            #self.assertGreater(m.predict([[1., 0., 0., 0.]])[0][0], 0.5)

        # Bulk Tests
        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.reshape(g, new_shape=[-1, 2, 2, 1])
            g = tflearn.conv_2d(g, 4, 2)
            g = tflearn.conv_2d(g, 4, 1)
            g = tflearn.conv_2d_transpose(g, 4, 2, [2, 2])
            g = tflearn.max_pool_2d(g, 2)
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def test_recurrent_layers(self):

        X = [[1, 3, 5, 7], [2, 4, 8, 10], [1, 5, 9, 11], [2, 6, 8, 0]]
        Y = [[0., 1.], [1., 0.], [0., 1.], [1., 0.]]

        with tf.Graph().as_default():
            g = tflearn.input_data(shape=[None, 4])
            g = tflearn.embedding(g, input_dim=12, output_dim=4)
            g = tflearn.lstm(g, 6)
            g = tflearn.fully_connected(g, 2, activation='softmax')
            g = tflearn.regression(g, optimizer='sgd', learning_rate=1.)

            m = tflearn.DNN(g)
            m.fit(X, Y, n_epoch=300, snapshot_epoch=False)
            self.assertGreater(m.predict([[5, 9, 11, 1]])[0][1], 0.9)
项目:Emotion-detection    作者:atulapra    | 项目源码 | 文件源码
def build_network(self):
      """
      Build the convnet.
      Input is 48x48
      3072 nodes in fully connected layer
      """ 
      print("\n---> Starting Neural Network \n") 
      self.network = input_data(shape = [None, 48, 48, 1])
      print("Input data",self.network.shape[1:])
      self.network = conv_2d(self.network, 64, 5, activation = 'relu')
      print("Conv1",self.network.shape[1:])
      self.network = max_pool_2d(self.network, 3, strides = 2)
      print("Maxpool",self.network.shape[1:])
      self.network = conv_2d(self.network, 64, 5, activation = 'relu')
      print("Conv2",self.network.shape[1:])
      self.network = max_pool_2d(self.network, 3, strides = 2)
      print("Maxpool2",self.network.shape[1:])
      self.network = conv_2d(self.network, 128, 4, activation = 'relu')
      print("Conv3",self.network.shape[1:])
      self.network = dropout(self.network, 0.3)
      print("Dropout",self.network.shape[1:])
      self.network = fully_connected(self.network, 3072, activation = 'relu')
      print("Fully connected",self.network.shape[1:])
      self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax')
      print("Output",self.network.shape[1:])
      print('\n')
      self.network = regression(self.network,optimizer = 'momentum',metric = 'accuracy',loss = 'categorical_crossentropy')
      self.model = tflearn.DNN(self.network,checkpoint_path = 'model_1_nimish',max_checkpoints = 1,tensorboard_verbose = 2)
      self.load_model()
项目:deep_portfolio    作者:deependersingla    | 项目源码 | 文件源码
def load_model_tflearn(look_back, batch_size, asset):
    net = make_network(look_back, batch_size)
    model = tfl.DNN(net, tensorboard_dir="./logs_tb", tensorboard_verbose=0)
    filepath = os.path.split(os.path.realpath(__file__))[0]
    model.load(filepath + "/networks/" + asset + "/lstm3.tflearn")
    return model
项目:EJGo    作者:ejmejm    | 项目源码 | 文件源码
def load_model(model_name):
    try:
        model = eval(model_name)
        if hasattr(model, "get_network"):
            return tflearn.DNN(model.get_network(), tensorboard_verbose=2, checkpoint_path="checkpoints/{}.tflearn".format(model_name))
        else:
            print("ERROR! model, {}, is not defined or does not contain a \"get_network()\" function".format(model_name))
    except NameError:
        print("ERROR! {} is not a valid module".format(model_name))
项目:PyMLT    作者:didw    | 项目源码 | 文件源码
def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
项目:rcnn-with-tflearn    作者:Redoblue    | 项目源码 | 文件源码
def test():
    images = []
    imgs = ['0/image_0001.jpg', '10/image_0801.jpg', '15/image_1201.jpg']
    for im in imgs:
        img = Image.open('17flowers/jpg/' + im)
        img = img.resize((227, 227))
        img = utils.pil2nparray(img)
        images.append(img)
    net = create_alexnet(17)
    model = tflearn.DNN(net)
    model.load('models/pre_train.model')
    preds = model.predict(images)
    results = np.argmax(preds, 1)
    print(results)
项目:rcnn-with-tflearn    作者:Redoblue    | 项目源码 | 文件源码
def fine_tune():
    print("Starting fine tuning...")
    if not os.path.isfile('data/dataset.pkl'):
        print("Preparing Data...")
        pr.propose_and_write('refine_list.txt', 2)

    print("Loading Data...")
    X, Y = pr.load_from_pkl('data/dataset.pkl')
    print("Loading Done.")

    # whether to restore the last layer
    restore = False
    if os.path.isfile('models/fine_tune.model.index'):
        restore = True
        print("Continue training...")

    net = create_alexnet(3, restore)
    model = tflearn.DNN(net, checkpoint_path='ckps/fine_tune.ckp',
                        max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='tmp/fine_tune_logs/')

    if os.path.isfile('models/fine_tune.model.index'):
        print("Loading the fine tuned model")
        model.load('models/fine_tune.model.index')
    elif os.path.isfile('models/pre_train.model'):
        print("Loading the alexnet")
        model.load('models/pre_train.model')
    else:
        print("No file to load, error")
        return False
    model.fit(X, Y, n_epoch=10, validation_set=0.1, shuffle=True,
              show_metric=True, batch_size=64, snapshot_step=200,
              snapshot_epoch=False, run_id='fine_tune')  # epoch = 1000
    # Save the model
    model.save('models/fine_tune.model')
项目:rnn-sentiment-analysis    作者:kashizui    | 项目源码 | 文件源码
def train(args, glove, data, param_file_path):
    if glove is None:
        embedding_size = (utils.NUM_UNIQUE_TOKENS, int(args['--embedding-dims']))
    else:
        embedding_size = glove[0].shape

    print("Loading model definition for %s..." % args['--model'])
    net = models.get_model(args['--model'], embedding_size=embedding_size,
                           train_embedding=args['--train-embedding'],
                           hidden_dims=int(args['--hidden-dims']),
                           learning_rate=float(args['--learning-rate']))
    model = tflearn.DNN(net, clip_gradients=5., tensorboard_verbose=0)

    if args['--evaluate-only'] or args['--continue-training']:
        print("Loading saved parameters from %s" % param_file_path)
        model.load(param_file_path)
    elif glove is not None:
        print("Initializing word embedding...")
        # Retrieve embedding layer weights (only a single weight matrix, so index is 0)
        embedding_weights = tflearn.get_layer_variables_by_name('EmbeddingLayer')[0]
        # Initialize with glove embedding
        model.set_weights(embedding_weights, glove[0])

    if not args['--evaluate-only']:
        print("Training...")
        model.fit(data.trainX, data.trainY,
                  n_epoch=int(args['--epochs']),
                  validation_set=(data.valX, data.valY),
                  show_metric=True, batch_size=128,
                  run_id=os.path.splitext(param_file_path)[0])

        print("Saving parameters to %s" % param_file_path)
        model.save(param_file_path)

    return model
项目:Convolutional-Autoencoder    作者:OliverEdholm    | 项目源码 | 文件源码
def main():
    X, _ = load_data()

    conv_autencoder = build_model()

    logging.info('training')
    model = tflearn.DNN(conv_autencoder, tensorboard_verbose=3)
    model.fit(X, X, n_epoch=20, shuffle=True, show_metric=True,
              batch_size=BATCH_SIZE, validation_set=0.1, snapshot_epoch=True,
              run_id='selfie_conv_autoencoder',
              checkpoint_path=CHECKPOINTS_PATH)
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height, 3], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network,
                        max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path=model_name,
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_frames(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height,frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = avg_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def alexnet2(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_v0(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)

    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)

    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 256, 3, 3, activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)

    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def alexnet(width, height, lr, output=3):
    network = input_data(shape=[None, width, height, 1], name='input')
    network = conv_2d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 256, 5, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 384, 3, activation='relu')
    network = conv_2d(network, 256, 3, activation='relu')
    network = max_pool_2d(network, 3, strides=2)
    network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_LSTM_gray(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height], name='input')
    #network = tflearn.input_data(shape=[None, 28, 28], name='input')
    network = tflearn.lstm(network, 128, return_seq=True)
    network = tflearn.lstm(network, 128)
    network = tflearn.fully_connected(network, 9, activation='softmax')
    network = tflearn.regression(network, optimizer='adam',
    loss='categorical_crossentropy', name="output1")

    model = tflearn.DNN(network, checkpoint_path='model_lstm',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_color(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path=model_name,
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet_frames(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height,frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
项目:pygta5    作者:Sentdex    | 项目源码 | 文件源码
def sentnet2(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 384, 3, activation='relu')
    network = conv_3d(network, 256, 3, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    #network = local_response_normalization(network)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 3, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model