我们从Python开源项目中,提取了以下6个代码示例,用于说明如何使用tflearn.Momentum()。
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'): net = input_data(shape=[None, width, height, 3], name='input') net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) net = tflearn.layers.conv.resnext_block(net, n, 16, 32) net = tflearn.resnext_block(net, 1, 32, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 32, 32) net = tflearn.resnext_block(net, 1, 64, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 64, 32) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) # Regression net = tflearn.fully_connected(net, output, activation='softmax') opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=opt, loss='categorical_crossentropy') model = tflearn.DNN(net, max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log') return model
def run(self): # Real-time pre-processing of the image data img_prep = ImagePreprocessing() img_prep.add_featurewise_zero_center() img_prep.add_featurewise_stdnorm() # Real-time data augmentation img_aug = tflearn.ImageAugmentation() img_aug.add_random_flip_leftright() # img_aug.add_random_crop([48, 48], padding=8) # Building Residual Network net = tflearn.input_data(shape=[None, 48, 48, 1], data_preprocessing=img_prep, data_augmentation=img_aug) net = tflearn.conv_2d(net, nb_filter=16, filter_size=3, regularizer='L2', weight_decay=0.0001) net = tflearn.residual_block(net, self.n, 16) net = tflearn.residual_block(net, 1, 32, downsample=True) net = tflearn.residual_block(net, self.n - 1, 32) net = tflearn.residual_block(net, 1, 64, downsample=True) net = tflearn.residual_block(net, self.n - 1, 64) net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) # Regression net = tflearn.fully_connected(net, 7, activation='softmax') mom = tflearn.Momentum(learning_rate=0.1, lr_decay=0.0001, decay_step=32000, staircase=True, momentum=0.9) net = tflearn.regression(net, optimizer=mom, loss='categorical_crossentropy') self.model = tflearn.DNN(net, checkpoint_path='models/model_resnet_emotion', max_checkpoints=10, tensorboard_verbose=0, clip_gradients=0.) self.model.load('current_model/model_resnet_emotion-42000') face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') cap = cv2.VideoCapture(0) while True: ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) for (x, y, w, h) in faces: cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2) roi_gray = gray[y:y + h, x:x + w] roi_color = img[y:y + h, x:x + w] self.process_image(roi_gray, img) if cv2.waitKey(1) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
def get_model(model_name): # First we load the network print("Setting up neural networks...") n = 18 # Real-time data preprocessing print("Doing preprocessing...") img_prep = tflearn.ImagePreprocessing() img_prep.add_featurewise_zero_center(per_channel=True, mean=[0.573364,0.44924123,0.39455055]) # Real-time data augmentation print("Building augmentation...") img_aug = tflearn.ImageAugmentation() img_aug.add_random_flip_leftright() img_aug.add_random_crop([32, 32], padding=4) #Build the model (for 32 x 32) print("Shaping input data...") net = tflearn.input_data(shape=[None, 32, 32, 3], data_preprocessing=img_prep, data_augmentation=img_aug) net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001) print("Carving Resnext blocks...") net = tflearn.resnext_block(net, n, 16, 32) net = tflearn.resnext_block(net, 1, 32, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 32, 32) net = tflearn.resnext_block(net, 1, 64, 32, downsample=True) net = tflearn.resnext_block(net, n-1, 64, 32) print("Erroding Gradient...") net = tflearn.batch_normalization(net) net = tflearn.activation(net, 'relu') net = tflearn.global_avg_pool(net) net = tflearn.fully_connected(net, 8, activation='softmax') opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True) net = tflearn.regression(net, optimizer=opt, loss='categorical_crossentropy') print("Structuring model...") model = tflearn.DNN(net, tensorboard_verbose=0, clip_gradients=0.) # Load the model from checkpoint print("Loading the model...") model.load(model_name) return model