我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用keras.callbacks.ReduceLROnPlateau()。
def callbacks(self): """ :return: """ # TODO: Add ReduceLROnPlateau callback cbs = [] tb = TensorBoard(log_dir=self.log_dir, write_graph=True, write_images=True) cbs.append(tb) best_model_filename = self.model_name + '_best.h5' best_model = os.path.join(self.checkpoint_dir, best_model_filename) save_best = ModelCheckpoint(best_model, save_best_only=True) cbs.append(save_best) checkpoints = ModelCheckpoint(filepath=self.checkpoint_file, verbose=1) cbs.append(checkpoints) reduce_lr = ReduceLROnPlateau(patience=1, verbose=1) cbs.append(reduce_lr) return cbs
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100): print 'Training variational autoencoder' optimizer = Adadelta(lr=2.) self.vae.compile(optimizer=optimizer, loss=self.vae_loss) self.vae.fit(train_X[0], train_X[1], shuffle=True, epochs=nb_epoch, batch_size=batch_size, validation_data=(val_X[0], val_X[1]), callbacks=[ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'), CustomModelCheckpoint(self.encoder, self.save_model, monitor='val_loss', save_best_only=True, mode='auto') ] ) return self
def runner(model, epochs): initial_LR = 0.001 if not use_multiscale and not use_multicrop: training_gen, val_gen = DataGen() else: training_gen, val_gen = ms_traingen(), ms_valgen() model.compile(optimizer=SGD(initial_LR, momentum=0.9, nesterov=True), loss='binary_crossentropy') val_checkpoint = ModelCheckpoint('bestval.h5','val_loss',1, True) cur_checkpoint = ModelCheckpoint('current.h5') # def lrForEpoch(i): return initial_LR lrScheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, cooldown=1, verbose=1) print 'Model compiled.' try: model.fit_generator(training_gen,samples_per_epoch,epochs, verbose=1,validation_data=val_gen,nb_val_samples=nb_val_samples, callbacks=[val_checkpoint, cur_checkpoint, lrScheduler]) except Exception as e: print e finally: fname = dumper(model,'cnn') print 'Model saved to disk at {}'.format(fname) return model
def train(self, model, saveto_path=''): x_train, y_train = get_data(self.train_data_path, "train", "frame", self.feature_type) print('%d training frame level samples.' % len(x_train)) x_valid, y_valid = get_data(self.valid_data_path, "valid", "frame", self.feature_type) print('%d validation frame level samples.' % len(x_valid)) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) callbacks = list() callbacks.append(CSVLogger(LOG_FILE)) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001)) if saveto_path: callbacks.append(ModelCheckpoint(filepath=MODEL_WEIGHTS, verbose=1)) model.fit(x_train, y_train, epochs=5, callbacks=callbacks, validation_data=(x_valid, y_valid)) # Save the weights on completion. if saveto_path: model.save_weights(saveto_path)
def build_keras_fit_callbacks(model_path): return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=0 ) ]
def build_keras_fit_callbacks(model_path): from keras import callbacks return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=0 ) ]
def keras_fit_callbacks(model_path): from keras import callbacks return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=1 ) ]
def main(): args = get_arguments() np.random.seed(args.random_seed) from molecules.model import MoleculeVAE from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, \ decode_smiles_from_indexes, load_dataset from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau data_train, data_test, charset = load_dataset(args.data) model = MoleculeVAE() if os.path.isfile(args.model): model.load(charset, args.model, latent_rep_size = args.latent_dim) else: model.create(charset, latent_rep_size = args.latent_dim) checkpointer = ModelCheckpoint(filepath = args.model, verbose = 1, save_best_only = True) reduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 3, min_lr = 0.0001) model.autoencoder.fit( data_train, data_train, shuffle = True, nb_epoch = args.epochs, batch_size = args.batch_size, callbacks = [checkpointer, reduce_lr], validation_data = (data_test, data_test) )
def test_ReduceLROnPlateau(): (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples, nb_test=test_samples, input_shape=(input_dim,), classification=True, nb_class=nb_class) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) def make_model(): np.random.seed(1337) model = Sequential() model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(nb_class, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizers.SGD(lr=0.1), metrics=['accuracy']) return model model = make_model() # This should reduce the LR after the first epoch (due to high epsilon). cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=10, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2) assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.01, atol=K.epsilon()) model = make_model() cbks = [callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, epsilon=0, patience=1, cooldown=5)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=5, verbose=2) assert np.allclose(float(K.get_value(model.optimizer.lr)), 0.1, atol=K.epsilon())
def test_TensorBoard_with_ReduceLROnPlateau(): import shutil filepath = './logs' (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=train_samples, nb_test=test_samples, input_shape=(input_dim,), classification=True, nb_class=nb_class) y_test = np_utils.to_categorical(y_test) y_train = np_utils.to_categorical(y_train) model = Sequential() model.add(Dense(nb_hidden, input_dim=input_dim, activation='relu')) model.add(Dense(nb_class, activation='softmax')) model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy']) cbks = [ callbacks.ReduceLROnPlateau( monitor='val_loss', factor=0.5, patience=4, verbose=1), callbacks.TensorBoard( log_dir=filepath)] model.fit(X_train, y_train, batch_size=batch_size, validation_data=(X_test, y_test), callbacks=cbks, nb_epoch=2) assert os.path.exists(filepath) shutil.rmtree(filepath)
def get_callbacks(experiment_dir, checkpoint_monitor='val_acc'): callbacks = [] # save model checkpoints filepath = os.path.join(experiment_dir, 'checkpoints', 'checkpoint-epoch_{epoch:03d}-val_acc_{val_acc:.3f}.hdf5') callbacks.append(ModelCheckpoint(filepath, monitor=checkpoint_monitor, verbose=1, save_best_only=False, mode='max')) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=3, verbose=1, mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)) callbacks.append(TensorBoard(log_dir=os.path.join(experiment_dir, 'tensorboard-logs'), histogram_freq=0, write_graph=True, write_images=False)) return callbacks
def __init__( self, max_length=10, latent_size=20, learning_rate=0.0001, ): self.max_length = max_length self.learning_rate = learning_rate self.latent_size = latent_size self.custom_objects = {} self.model, self.encoder = self.create_model() self.best_model_name = self.__class__.__name__ + '_best' self.reduce_lr = ReduceLROnPlateau( monitor='val_loss', verbose=1, factor=0.3, patience=3, cooldown=3, min_lr=1e-6 ) self.early_stopping = EarlyStopping( monitor='val_loss', patience=3, verbose=1, ) self.model_cp = ModelCheckpoint( self.best_model_name, monitor='val_loss', verbose=1, save_best_only=True, )
def __init__(self, epochs=100, verbose=1, limit_data=False, name='default_name', evoluation_time=1, clean=True, dataset_type='cifar10', max_pooling_cnt=0, debug=False): # for all model: self.dataset_type = dataset_type self.limit_data = limit_data if dataset_type == 'cifar10' or dataset_type == 'svhn' or dataset_type == 'cifar100': self.input_shape = (32, 32, 3) else: self.input_shape = (28, 28, 1) if dataset_type == 'cifar100': self.nb_class = 100 else: self.nb_class = 10 self.dataset = None if limit_data: self.load_data(9999, type=self.dataset_type) else: self.load_data(1, type=self.dataset_type) # for ga: self.evoluation_time = evoluation_time # for single model self.set_name(name, clean=clean) self.batch_size = 256 self.epochs = epochs self.verbose = verbose self.lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=10, min_lr=0.5e-7) self.early_stopper = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10) self.csv_logger = None self.set_logger_path(self.name + '.csv') self.debug = debug self.max_pooling_limit = int(log(min(self.input_shape[0], self.input_shape[1]), 2)) - 2 self.max_pooling_cnt = max_pooling_cnt self.model_max_conv_width = 1024 self.model_min_conv_width = 128 self.model_max_depth = 20 self.kernel_regularizer_l2 = 0.01
def train(self, model, saveto_path=''): x_train, y_train = get_data(self.train_data_path, "train", "frame", self.feature_type) print('%d training frame level samples.' % len(x_train)) x_valid, y_valid = get_data(self.valid_data_path, "valid", "frame", self.feature_type) print('%d validation frame level samples.' % len(x_valid)) sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) callbacks = list() callbacks.append(CSVLogger(LOG_FILE)) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001)) if saveto_path: callbacks.append(ModelCheckpoint(filepath=saveto_path, verbose=1)) model.fit(x_train, y_train, nb_epoch=5, callbacks=callbacks, validation_data=(x_valid, y_valid)) # Save the weights on completion. if saveto_path: model.save_weights(saveto_path)
def train(self, model, saveto_path=''): x_train, y_train = get_data(self.train_data_path, "train", "video", self.feature_type) print('%d training video level samples.' % len(x_train)) x_valid, y_valid = get_data(self.valid_data_path, "valid", "video", self.feature_type) print('%d validation video level samples.' % len(x_valid)) sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) callbacks = list() callbacks.append(CSVLogger(LOG_FILE)) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001)) if saveto_path: callbacks.append(ModelCheckpoint(filepath=VID_MODEL_WEIGHTS, verbose=1)) model.fit(x_train, y_train, epochs=5, callbacks=callbacks, validation_data=(x_valid, y_valid)) # Save the weights on completion. if saveto_path: model.save_weights(saveto_path)
def train(self, model, saveto_path=''): x_train, y_train = get_data(self.train_data_path, "train", "video", self.feature_type) print('%d training video level samples.' % len(x_train)) x_valid, y_valid = get_data(self.valid_data_path, "valid", "video", self.feature_type) print('%d validation video level samples.' % len(x_valid)) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) callbacks = list() callbacks.append(CSVLogger(LOG_FILE)) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001)) if saveto_path: callbacks.append(ModelCheckpoint(filepath=MODEL_WEIGHTS, verbose=1)) model.fit(x_train, y_train, epochs=5, callbacks=callbacks, validation_data=(x_valid, y_valid)) # Save the weights on completion. if saveto_path: model.save_weights(saveto_path)
def get_model_resnet(self): try : keras.backend.tensorflow_backend.clear_session() self.lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6) self.early_stopper = EarlyStopping(monitor='val_acc', min_delta=0.001, patience=10) self.csv_logger = CSVLogger('resnet.csv') num_classes = self.netconf["config"]["num_classes"] numoutputs = self.netconf["config"]["layeroutputs"] x_size = self.dataconf["preprocess"]["x_size"] y_size = self.dataconf["preprocess"]["y_size"] channel = self.dataconf["preprocess"]["channel"] optimizer = self.netconf["config"]["optimizer"] filelist = os.listdir(self.model_path) filelist.sort(reverse=True) last_chk_path = self.model_path + "/" + self.load_batch+self.file_end try: self.model = keras.models.load_model(last_chk_path) logging.info("Train Restored checkpoint from:" + last_chk_path) except Exception as e: if numoutputs == 18: self.model = resnet.ResnetBuilder.build_resnet_18((channel, x_size, y_size), num_classes) elif numoutputs == 34: self.model = resnet.ResnetBuilder.build_resnet_34((channel, x_size, y_size), num_classes) elif numoutputs == 50: self.model = resnet.ResnetBuilder.build_resnet_50((channel, x_size, y_size), num_classes) elif numoutputs == 101: self.model = resnet.ResnetBuilder.build_resnet_101((channel, x_size, y_size), num_classes) elif numoutputs == 152: self.model = resnet.ResnetBuilder.build_resnet_152((channel, x_size, y_size), num_classes) elif numoutputs == 200: self.model = resnet.ResnetBuilder.build_resnet_200((channel, x_size, y_size), num_classes) logging.info("None to restore checkpoint. Initializing variables instead." + last_chk_path) logging.info(e) self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) except Exception as e : logging.error("===Error on Residualnet build model : {0}".format(e)) ####################################################################################################################
def train(name, model, callbacks=None, batch_size=32, nb_epoch=200): """Common cifar10 training code. """ callbacks = callbacks or [] tb = TensorBoard(log_dir='./logs/{}'.format(name)) model_checkpoint = ModelCheckpoint('./weights/{}.hdf5'.format(name), monitor='val_loss', save_best_only=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, min_lr=1e-7) callbacks.extend([reduce_lr, tb, model_checkpoint]) print("Training {}".format(name)) # This will do preprocessing and realtime data augmentation: datagen = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization=False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range=0.1, # randomly shift images horizontally (fraction of total width) height_shift_range=0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images # Compute quantities required for feature-wise normalization # (std, mean, and principal components if ZCA whitening is applied). datagen.fit(X_train) # Fit the model on the batches generated by datagen.flow(). model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), samples_per_epoch=X_train.shape[0], nb_epoch=nb_epoch, verbose=2, max_q_size=1000, callbacks=callbacks, validation_data=(X_test, Y_test))
def fit_model_resnet50(X_train, X_test, Y_train, Y_test, save_output_root, model_type, name_time, batch_size, epochs, input_shape): print('\nBatch size: {} \nCompiling model...'.format(batch_size)) generator = _image_generator(X_train, Y_train) # checkpoint filepath='weights/weights-improvement142-{epoch:02d}-{val_acc:.2f}.hdf5' checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max') # Change learning rate when learning plateaus reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.00001) # Stop model once it stops improving to prevent overfitting early_stop = EarlyStopping(monitor='val_acc', min_delta=0, patience=3, verbose=0, mode='auto') # put all callback functions in a list callbacks_list = [checkpoint, reduce_lr] history = final_model.fit_generator( generator.flow(X_train, Y_train, batch_size=batch_size), steps_per_epoch=(X_train.shape[0] // batch_size), epochs=epochs, validation_data=(X_test, Y_test), callbacks=callbacks_list, shuffle=True ) score = final_model.evaluate(X_test, Y_test, verbose=0, batch_size=batch_size) ypred = final_model.predict(X_test) # ypred_classes = final_model.predict_classes(X_test) print('Test score:', score[0]) print('Test accuracy:', score[1]) return ypred, final_model, history # def visualize_layers(model): # layer_dict = dict([(layer.name, layer) for layer in model.layers])
def get_callback(): def return_callback(): from keras.callbacks import ReduceLROnPlateau reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001) return reduce_lr return return_callback
def main(): args = get_arguments() np.random.seed(args.random_seed) from molecules.model import MoleculeVAE from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau data = pd.read_hdf(args.data, 'table') structures = data['structure'] # import gzip # filepath = args.data # structures = [line.split()[0].strip() for line in gzip.open(filepath) if line] # can also use CanonicalSmilesDataGenerator datobj = SmilesDataGenerator(structures, MAX_LEN, test_split=args.test_split, random_seed=args.random_seed) test_divisor = int((1 - datobj.test_split) / (datobj.test_split)) train_gen = datobj.train_generator(args.batch_size) test_gen = datobj.test_generator(args.batch_size) # reformulate generators to not use weights train_gen = ((tens, tens) for (tens, _, weights) in train_gen) test_gen = ((tens, tens) for (tens, _, weights) in test_gen) model = MoleculeVAE() if os.path.isfile(args.model): model.load(datobj.chars, args.model, latent_rep_size = args.latent_dim) else: model.create(datobj.chars, latent_rep_size = args.latent_dim) checkpointer = ModelCheckpoint(filepath = args.model, verbose = 1, save_best_only = True) reduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 3, min_lr = 0.0001) model.autoencoder.fit_generator( train_gen, args.epoch_size, nb_epoch = args.epochs, callbacks = [checkpointer, reduce_lr], validation_data = test_gen, nb_val_samples = args.epoch_size / test_divisor, pickle_safe = True )
def seq(x_train,y_train,x_val,y_val,x_test,y_test): #Defining the structure of the neural network #Creating a Network, with 2 Convolutional layers model=Sequential() # model.add(Conv2D(128,(3,5),activation='relu',input_shape=(1,39,40))) # model.add(Conv2D(64,(3,5))) # model.add(MaxPooling2D((2,2))) # model.add(Flatten()) model.add(Dense(512,activation='relu',input_shape=(780,))) model.add(Dense(512,activation='relu')) #Fully connected layer 1 # model.add(Dropout(0.5)) model.add(Dense(2,activation='softmax')) #Output Layer model.summary() # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+') # print f >> model.summary() data_saver(str(model.to_json())) # f.close() sgd=SGD(lr=0.1) early_stopping=EarlyStopping(monitor='val_loss',patience=4) reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5,min_lr=0.0000001) #Compilation region: Define optimizer, cost function, and the metric? model.compile(optimizer=sgd,loss='binary_crossentropy',metrics=['accuracy']) #Fitting region:Get to fit the model, with training data checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True) #Doing the training[fitting] model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr]) model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state ### SAVING THE VALIDATION DATA ### scores=model.predict(x_val,batch_size=batch) sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores. classes=model.predict_classes(x_train,batch_size=batch) ### ------------- ### ### SAVING THE TESTING DATA ### #scores_test=model.predict(x_test,batch_size=batch) #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test}) ### ------------- ### # print model.evaluate(x_test,y_test,batch_size=batch) #predictions=model.predict(x_val,batch_size=batch) #print "Shape of predictions: ", predictions.shape #print "Shape of y_test: ",y_test.shape return classes #Non-function section #y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
def seq(x_train,y_train,x_val,y_val,x_test,y_test): #Defining the structure of the neural network #Creating a Network, with 2 Convolutional layers model=Sequential() # model.add(Conv2D(128,(3,5),activation='relu',input_shape=(1,39,40))) # model.add(Conv2D(64,(3,5))) # model.add(MaxPooling2D((2,2))) # model.add(Flatten()) model.add(Dense(256,activation='relu',input_shape=(3904,))) model.add(Dense(512,activation='relu')) #Fully connected layer 1 model.add(Dropout(0.25)) model.add(Dense(512,activation='relu')) #Fully connected layer 1 model.add(Dropout(0.25)) model.add(Dense(2,activation='softmax')) #Output Layer model.summary() # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+') # print f >> model.summary() data_saver("##### -------- #####") data_saver(str(model.to_json())) # f.close() sgd=SGD(lr=1) early_stopping=EarlyStopping(monitor='val_loss',patience=6) reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5,min_lr=0.0000001) #Compilation region: Define optimizer, cost function, and the metric? model.compile(optimizer=sgd,loss='binary_crossentropy',metrics=['accuracy']) #Fitting region:Get to fit the model, with training data checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True) #Doing the training[fitting] model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr]) model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state ### SAVING THE VALIDATION DATA ### scores=model.predict(x_val,batch_size=batch) sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores. classes=model.predict_classes(x_val,batch_size=batch) ### ------------- ### ### SAVING THE TESTING DATA ### #scores_test=model.predict(x_test,batch_size=batch) #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test}) ### ------------- ### # print model.evaluate(x_test,y_test,batch_size=batch) #predictions=model.predict(x_val,batch_size=batch) #print "Shape of predictions: ", predictions.shape print "Training 0 class: ",len(np.where(y_train[:,0]==1)[0]) print "Training 1 class: ",len(np.where(y_train[:,1]==1)[0]) return classes #Non-function section #y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
def seq(x_train,y_train,x_val,y_val,x_test,y_test): #Defining the structure of the neural network #Creating a Network, with 2 Convolutional layers model=Sequential() model.add(Conv2D(128,(2,5),activation='relu',input_shape=(1,39,20))) model.add(Conv2D(128,(2,3))) model.add(Conv2D(64,(2,3))) model.add(MaxPooling2D((2,2))) model.add(Flatten()) model.add(Dense(1024,activation='relu')) #Fully connected layer 1 model.add(Dropout(0.5)) model.add(Dense(2,activation='softmax')) #Output Layer model.summary() # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+') # print f >> model.summary() data_saver(str(model.to_json())) # f.close() sgd=SGD(lr=0.1) early_stopping=EarlyStopping(monitor='val_loss',patience=4) reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,min_lr=0.0000001) #Compilation region: Define optimizer, cost function, and the metric? model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy']) #Fitting region:Get to fit the model, with training data checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True) #Doing the training[fitting] model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr]) model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state ### SAVING THE VALIDATION DATA ### scores=model.predict(x_val,batch_size=batch) sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores. classes=model.predict_classes(x_train,batch_size=batch) ### ------------- ### ### SAVING THE TESTING DATA ### #scores_test=model.predict(x_test,batch_size=batch) #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test}) ### ------------- ### # print model.evaluate(x_test,y_test,batch_size=batch) #predictions=model.predict(x_val,batch_size=batch) #print "Shape of predictions: ", predictions.shape #print "Shape of y_test: ",y_test.shape return classes #Non-function section #y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
def seq(x_train,y_train,x_val,y_val,x_test,y_test): #Defining the structure of the neural network #Creating a Network, with 2 Convolutional layers model=Sequential() # model.add(Conv2D(128,(3,5),activation='relu',input_shape=(1,39,40))) # model.add(Conv2D(64,(3,5))) # model.add(MaxPooling2D((2,2))) # model.add(Flatten()) model.add(Dense(256,activation='relu',input_shape=(5184,))) model.add(Dense(512,activation='relu')) #Fully connected layer 1 # model.add(Dropout(0.5)) model.add(Dense(512,activation='relu')) #Fully connected layer 1 model.add(Dropout(0.5)) model.add(Dense(2,activation='softmax')) #Output Layer model.summary() # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+') # print f >> model.summary() data_saver("##### -------- #####") data_saver(str(model.to_json())) # f.close() sgd=SGD(lr=1) early_stopping=EarlyStopping(monitor='val_loss',patience=6) reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5,min_lr=0.0000001) #Compilation region: Define optimizer, cost function, and the metric? model.compile(optimizer=sgd,loss='binary_crossentropy',metrics=['accuracy']) #Fitting region:Get to fit the model, with training data checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True) #Doing the training[fitting] model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr]) model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state ### SAVING THE VALIDATION DATA ### scores=model.predict(x_val,batch_size=batch) sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores. classes=model.predict_classes(x_val,batch_size=batch) ### ------------- ### ### SAVING THE TESTING DATA ### #scores_test=model.predict(x_test,batch_size=batch) #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test}) ### ------------- ### # print model.evaluate(x_test,y_test,batch_size=batch) #predictions=model.predict(x_val,batch_size=batch) #print "Shape of predictions: ", predictions.shape data_saver(str(len(np.where(y_train[:,0]==1)[0]))) data_saver(str(len(np.where(y_train[:,1]==1)[0]))) print "Training 0 class: ",len(np.where(y_train[:,0]==1)[0]) print "Training 1 class: ",len(np.where(y_train[:,1]==1)[0]) return classes #Non-function section #y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers
def seq(x_train,y_train,x_val,y_val,x_test,y_test): #Defining the structure of the neural network #Creating a Network, with 2 Convolutional layers model=Sequential() model.add(Conv2D(64,(7,5),activation='relu',input_shape=(1,40,20))) model.add(Conv2D(128,(5,3),activation='relu',padding='same')) model.add(Conv2D(256,(3,3),activation='relu')) model.add(MaxPooling2D((5,2))) model.add(Flatten()) model.add(Dense(256,activation='relu')) #Fully connected layer 1 model.add(Dropout(0.5)) model.add(Dense(256,activation='relu')) #Fully connected layer 1 model.add(Dropout(0.5)) model.add(Dense(2,activation='softmax')) #Output Layer model.summary() # f=open('/home/siddharthm/scd/scores/'+common_save+'-complete.txt','rb+') # print f >> model.summary() data_saver("##### ------ #####") data_saver(str(model.to_json())) # f.close() #Compilation region: Define optimizer, cost function, and the metric? sgd=SGD(lr=1) early_stopping=EarlyStopping(monitor='val_loss',patience=4) reduce_lr=ReduceLROnPlateau(monitor='val_loss',patience=4,factor=0.5) model.compile(optimizer=sgd,loss='categorical_crossentropy',metrics=['accuracy']) #Fitting region:Get to fit the model, with training data checkpointer=ModelCheckpoint(filepath=direc+common_save+'.json',monitor='val_acc',save_best_only=True,save_weights_only=True) #Doing the training[fitting] model.fit(x_train,y_train,epochs=EPOCH,batch_size=batch,validation_data=(x_val,y_val),callbacks=[checkpointer,early_stopping,reduce_lr]) model.save_weights(direc+common_save+'-weights'+'.json') #Saving the weights from the model model.save(direc+common_save+'-model'+'.json')#Saving the model as is in its state ### SAVING THE VALIDATION DATA ### scores=model.predict(x_val,batch_size=batch) sio.savemat(direc+name_val+'.mat',{'scores':scores,'ytest':y_val}) #These are the validation scores. classes=model.predict_classes(x_val,batch_size=batch) ### ------------- ### ### SAVING THE TESTING DATA ### #scores_test=model.predict(x_test,batch_size=batch) #sio.savemat(direc+name_test+'.mat',{'scores':scores_test,'ytest':y_test}) ### ------------- ### # print model.evaluate(x_test,y_test,batch_size=batch) #predictions=model.predict(x_val,batch_size=batch) #print "Shape of predictions: ", predictions.shape #print "Shape of y_test: ",y_test.shape return classes #Non-function section #y_test,predictions,classes=seq(x_train,y_train,x_val,y_val,x_test,y_test) #Calling the seq model, with 2 hidden layers