我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.callbacks.ModelCheckpoint()。
def callbacks(self): """ :return: """ # TODO: Add ReduceLROnPlateau callback cbs = [] tb = TensorBoard(log_dir=self.log_dir, write_graph=True, write_images=True) cbs.append(tb) best_model_filename = self.model_name + '_best.h5' best_model = os.path.join(self.checkpoint_dir, best_model_filename) save_best = ModelCheckpoint(best_model, save_best_only=True) cbs.append(save_best) checkpoints = ModelCheckpoint(filepath=self.checkpoint_file, verbose=1) cbs.append(checkpoints) reduce_lr = ReduceLROnPlateau(patience=1, verbose=1) cbs.append(reduce_lr) return cbs
def train(model, batch_size, nb_epoch, save_dir, train_data, val_data, char_set): X_train, y_train = train_data[0], train_data[1] sample_weight = get_sample_weight(y_train, char_set) print 'X_train shape:', X_train.shape print X_train.shape[0], 'train samples' if os.path.exists(save_dir) == False: os.mkdir(save_dir) start_time = time.time() save_path = save_dir + 'weights.{epoch:02d}-{val_loss:.2f}.hdf5' check_pointer = ModelCheckpoint(save_path, save_best_only=True) history = model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=val_data, validation_split=0.1, callbacks=[check_pointer], sample_weight=sample_weight ) plot_loss_figure(history, save_dir + str(datetime.now()).split('.')[0].split()[1]+'.jpg') print 'Training time(h):', (time.time()-start_time) / 3600
def lengthy_test(model, testrange=[5,10,20,40,80], epochs=100, verboose=True): ts = datetime.now().strftime("%Y-%m-%d_%H:%M:%S") log_path = LOG_PATH_BASE + ts + "_-_" + model.name tensorboard = TensorBoard(log_dir=log_path, write_graph=False, #This eats a lot of space. Enable with caution! #histogram_freq = 1, write_images=True, batch_size = model.batch_size, write_grads=True) model_saver = ModelCheckpoint(log_path + "/model.ckpt.{epoch:04d}.hdf5", monitor='loss', period=1) callbacks = [tensorboard, TerminateOnNaN(), model_saver] for i in testrange: acc = test_model(model, sequence_length=i, verboose=verboose) print("the accuracy for length {0} was: {1}%".format(i,acc)) train_model(model, epochs=epochs, callbacks=callbacks, verboose=verboose) for i in testrange: acc = test_model(model, sequence_length=i, verboose=verboose) print("the accuracy for length {0} was: {1}%".format(i,acc)) return
def train_model(model, X, X_test, Y, Y_test): batch_size = 100 epochs = 2 checkpoints = [] if not os.path.exists('Data/Checkpoints/'): os.makedirs('Data/Checkpoints/') checkpoints.append(ModelCheckpoint('Data/Checkpoints/best_weights.h5', monitor='val_loss', verbose=0, save_best_only=True, save_weights_only=True, mode='auto', period=1)) checkpoints.append(TensorBoard(log_dir='Data/Checkpoints/./logs', histogram_freq=0, write_graph=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)) # Creates live data: # For better yield. The duration of the training is extended. # If you don't want, use this: # model.fit(X, Y, batch_size=batch_size, epochs=epochs, validation_data=(X_test, Y_test), shuffle=True, callbacks=checkpoints) generated_data = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip = True, vertical_flip = False) generated_data.fit(X) model.fit_generator(generated_data.flow(X, Y, batch_size=batch_size), steps_per_epoch=X.shape[0]/6, epochs=epochs, validation_data=(X_test, Y_test), callbacks=checkpoints) return model
def model(data, hidden_layers, hidden_neurons, output_file, validation_split=0.9): train_n = int(validation_split * len(data)) batch_size = 50 train_data = data[:train_n,:] val_data = data[train_n:,:] input_sh = Input(shape=(data.shape[1],)) encoded = noise.GaussianNoise(0.2)(input_sh) for i in range(hidden_layers): encoded = Dense(hidden_neurons[i], activation='relu')(encoded) encoded = noise.GaussianNoise(0.2)(encoded) decoded = Dense(hidden_neurons[-2], activation='relu')(encoded) for j in range(hidden_layers-3,-1,-1): decoded = Dense(hidden_neurons[j], activation='relu')(decoded) decoded = Dense(data.shape[1], activation='sigmoid')(decoded) autoencoder = Model(input=input_sh, output=decoded) autoencoder.compile(optimizer='adadelta', loss='mse') checkpointer = ModelCheckpoint(filepath='data/bestmodel' + output_file + ".hdf5", verbose=1, save_best_only=True) earlystopper = EarlyStopping(monitor='val_loss', patience=15, verbose=1) train_generator = DataGenerator(batch_size) train_generator.fit(train_data, train_data) val_generator = DataGenerator(batch_size) val_generator.fit(val_data, val_data) autoencoder.fit_generator(train_generator, samples_per_epoch=len(train_data), nb_epoch=100, validation_data=val_generator, nb_val_samples=len(val_data), max_q_size=batch_size, callbacks=[checkpointer, earlystopper]) enco = Model(input=input_sh, output=encoded) enco.compile(optimizer='adadelta', loss='mse') reprsn = enco.predict(data) return reprsn
def get_callbacks(self, model_prefix='Model'): """ Creates a list of callbacks that can be used during training to create a snapshot ensemble of the model. Args: model_prefix: prefix for the filename of the weights. Returns: list of 3 callbacks [ModelCheckpoint, LearningRateScheduler, SnapshotModelCheckpoint] which can be provided to the 'fit' function """ if not os.path.exists('weights/'): os.makedirs('weights/') callback_list = [ModelCheckpoint('weights/%s-Best.h5' % model_prefix, monitor='val_acc', save_best_only=True, save_weights_only=True), LearningRateScheduler(schedule=self._cosine_anneal_schedule), SnapshotModelCheckpoint(self.T, self.M, fn_prefix='weights/%s' % model_prefix)] return callback_list
def train_multilabel_bts(lang_db, imdb, pretrained, max_iters = 1000, loss_func = 'squared_hinge', box_method = 'random'): # Create callback_list. dir_path = osp.join('output', 'bts_ckpt', imdb.name) tensor_path = osp.join(dir_path, 'log_dir') if not osp.exists(dir_path): os.makedirs(dir_path) if not osp.exists(tensor_path): os.makedirs(tensor_path) ckpt_save = osp.join(dir_path, lang_db.name + '_multi_label_fixed_' + 'weights-{epoch:02d}.hdf5') checkpoint = ModelCheckpoint(ckpt_save, monitor='loss', verbose=1, save_best_only=True) early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=3, verbose=0, mode='auto') tensorboard = TensorBoard(log_dir=dir_path, histogram_freq=2000, write_graph=True, write_images=False) callback_list = [checkpoint, early_stop, tensorboard] pretrained.fit_generator(load_multilabel_data(imdb, lang_db, pretrained, box_method), steps_per_epoch = 5000, epochs = max_iters, verbose = 1, callbacks = callback_list, workers = 1) pretrained.save(osp.join(dir_path, 'model_fixed' + imdb.name + '_' + lang_db.name + '_ML_' + box_method + '_' + loss_func + '.hdf5'))
def fit(self, train_X, val_X, nb_epoch=50, batch_size=100, feature_weights=None): print 'Training autoencoder' optimizer = Adadelta(lr=1.5) # optimizer = Adam() # optimizer = Adagrad() if feature_weights is None: self.autoencoder.compile(optimizer=optimizer, loss='binary_crossentropy') # kld, binary_crossentropy, mse else: print 'Using weighted loss' self.autoencoder.compile(optimizer=optimizer, loss=weighted_binary_crossentropy(feature_weights)) # kld, binary_crossentropy, mse self.autoencoder.fit(train_X[0], train_X[1], nb_epoch=nb_epoch, batch_size=batch_size, shuffle=True, validation_data=(val_X[0], val_X[1]), callbacks=[ ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.01), EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=5, verbose=1, mode='auto'), # ModelCheckpoint(self.model_save_path, monitor='val_loss', save_best_only=True, verbose=0), ] ) return self
def train_model(self): if self.verbose: print 'training model ... ' start_time = time.time() self.checkpointer = ModelCheckpoint(filepath=self.weights_filename, verbose=1, save_best_only=True) self.history = History() self.model.fit_generator(self.datagen.flow(self.xs_train, self.ys_train, batch_size=32), samples_per_epoch=len(self.xs_train), nb_epoch=self.num_training_epochs, validation_data=(self.xs_val, self.ys_val), callbacks=[self.checkpointer, self.history]) if self.verbose: end_time = time.time() self.print_time(start_time, end_time,'training model')
def finetune_model(self): if self.verbose: print 'training model ... ' start_time = time.time() self.checkpointer = ModelCheckpoint(filepath=self.weights_filename, verbose=1, save_best_only=True) self.history = History() self.model.fit_generator(self.datagen.flow(self.xs_train, self.ys_train, batch_size=32), samples_per_epoch=len(self.xs_train), nb_epoch=self.num_training_epochs, validation_data=(self.xs_val, self.ys_val), callbacks=[self.checkpointer, self.history]) if self.verbose: end_time = time.time() self.print_time(start_time, end_time,'training model')
def runner(model, epochs): initial_LR = 0.001 if not use_multiscale and not use_multicrop: training_gen, val_gen = DataGen() else: training_gen, val_gen = ms_traingen(), ms_valgen() model.compile(optimizer=SGD(initial_LR, momentum=0.9, nesterov=True), loss='binary_crossentropy') val_checkpoint = ModelCheckpoint('bestval.h5','val_loss',1, True) cur_checkpoint = ModelCheckpoint('current.h5') # def lrForEpoch(i): return initial_LR lrScheduler = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, cooldown=1, verbose=1) print 'Model compiled.' try: model.fit_generator(training_gen,samples_per_epoch,epochs, verbose=1,validation_data=val_gen,nb_val_samples=nb_val_samples, callbacks=[val_checkpoint, cur_checkpoint, lrScheduler]) except Exception as e: print e finally: fname = dumper(model,'cnn') print 'Model saved to disk at {}'.format(fname) return model
def _get_callbacks(self): """Return callbacks to pass into the Model.fit method Note: This simply returns statically instantiated callbacks. In the future it could be altered to allow for callbacks that are specified and configured via a training config. """ fpath_history = os.path.join(self.output_dir, 'history.csv') fpath_weights = os.path.join(self.output_dir, 'weights.h5') csv_logger = CSVLogger(filename=fpath_history) model_checkpoint = ModelCheckpoint( filepath=fpath_weights, verbose=True ) callbacks = [csv_logger, model_checkpoint] return callbacks
def _train_model(): data_info = load_organized_data_info(IMGS_DIM_3D[1]) dir_tr = data_info['dir_tr'] dir_val = data_info['dir_val'] gen_tr, gen_val = train_val_dirs_generators(BATCH_SIZE, dir_tr, dir_val) model = _cnn(IMGS_DIM_3D) model.fit_generator( generator=gen_tr, nb_epoch=MAX_EPOCHS, samples_per_epoch=data_info['num_tr'], validation_data=gen_val, nb_val_samples=data_info['num_val'], callbacks=[ModelCheckpoint(CNN_MODEL_FILE, save_best_only=True)], verbose=2)
def train(self, n_iterations): print("Training network...") # Recover losses from previous run if (self.option == 'continue'): with open("{0}_{1}_loss.json".format(self.root, self.depth), 'r') as f: losses = json.load(f) else: losses = [] self.checkpointer = ModelCheckpoint(filepath="{0}_{1}_weights.hdf5".format(self.root, self.depth), verbose=1, save_best_only=True) self.history = LossHistory(self.root, self.depth, losses, {'name': '{0}_{1}'.format(self.root, self.depth), 'init_t': time.asctime()}) self.reduce_lr = LearningRateScheduler(self.learning_rate) self.metrics = self.model.fit_generator(self.training_generator(), self.batchs_per_epoch_training, epochs=n_iterations, callbacks=[self.checkpointer, self.history, self.reduce_lr], validation_data=self.validation_generator(), validation_steps=self.batchs_per_epoch_validation) self.history.finalize()
def finetuning_callbacks(checkpoint_path, patience, verbose): """ Callbacks for model training. # Arguments: checkpoint_path: Where weight checkpoints should be saved. patience: Number of epochs with no improvement after which training will be stopped. # Returns: Array with training callbacks that can be passed straight into model.fit() or similar. """ cb_verbose = (verbose >= 2) checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path, save_best_only=True, verbose=cb_verbose) earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=cb_verbose) return [checkpointer, earlystop]
def trainmodel(self, isalldata): self.buildmodel_rcnn4_att_titledsp() import time cur_time = time.strftime('%Y-%m-%d-%H-%M', time.localtime(time.time())) checkpointer = ModelCheckpoint(filepath=self.savedir + "/" + cur_time + "_model-{epoch:02d}.hdf5", period=1) zhihuMetrics = ZHIHUMetrics() if isalldata: self.model.fit([self.titlechar_array, self.titleword_array, self.dspchar_array, self.dspword_array], self.y, epochs=self.num_epochs, batch_size=self.batch_size, verbose=1, callbacks=[checkpointer]) else:#with 9:1 validation self.model.fit([self.titlechar_array, self.titleword_array, self.dspchar_array, self.dspword_array], self.y, validation_split=0.1, epochs=self.num_epochs, batch_size=self.batch_size, verbose=1, callbacks=[checkpointer, zhihuMetrics]) self.save_model()
def init_callbacks(self, for_worker=False): """Prepares all keras callbacks to be used in training. Automatically attaches a History callback to the end of the callback list. If for_worker is True, leaves out callbacks that only make sense with validation enabled.""" import keras.callbacks as cbks remove_for_worker = [cbks.EarlyStopping, cbks.ModelCheckpoint] if for_worker: for obj in remove_for_worker: self.callbacks_list = [ c for c in self.callbacks_list if not isinstance(c, obj) ] self.model.history = cbks.History() self.callbacks = cbks.CallbackList( self.callbacks_list + [self.model.history] ) # it's possible to callback a different model than self # (used by Sequential models) if hasattr(self.model, 'callback_model') and self.model.callback_model: self.callback_model = self.model.callback_model else: self.callback_model = self.model self.callbacks.set_model(self.callback_model) self.callback_model.stop_training = False
def train(self, model, saveto_path=''): x_train, y_train = get_data(self.train_data_path, "train", "frame", self.feature_type) print('%d training frame level samples.' % len(x_train)) x_valid, y_valid = get_data(self.valid_data_path, "valid", "frame", self.feature_type) print('%d validation frame level samples.' % len(x_valid)) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) callbacks = list() callbacks.append(CSVLogger(LOG_FILE)) callbacks.append(ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, min_lr=0.0001)) if saveto_path: callbacks.append(ModelCheckpoint(filepath=MODEL_WEIGHTS, verbose=1)) model.fit(x_train, y_train, epochs=5, callbacks=callbacks, validation_data=(x_valid, y_valid)) # Save the weights on completion. if saveto_path: model.save_weights(saveto_path)
def train_chrom_labeller(model, train_tuple, valid_tuple, save_weight_hd5): checkpointer = ModelCheckpoint(filepath=save_weight_hd5, verbose=1, save_best_only=True) earlystopper = EarlyStopping(monitor='val_loss', patience=3, verbose=1) history = model.fit(train_tuple[0], train_tuple[1], batch_size=32, nb_epoch=150, shuffle=False, validation_data=(valid_tuple[0], valid_tuple[1]), callbacks=[checkpointer,earlystopper]) plot_metric_history(history, weight_path_to_title(save_weight_hd5)) return model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~ Training Data ~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def train_model(self, batch_size=32, nb_epoch=50,load_data = False,old_weight_path=''): print("start training model...") if load_data: train_data, train_labels, valid_data, valid_labels = self.load_data() else: train_data, train_labels, valid_data, valid_labels = self.prepare_train_data() model = self.baseModel() if old_weight_path != '': print("load last epoch model to continue train") model.load_weights(old_weight_path) model.fit(train_data, train_labels, batch_size=batch_size, epochs=nb_epoch, validation_data=(valid_data, valid_labels), callbacks=[ModelCheckpoint("output/weights.{epoch:02d}-{val_loss:.2f}.hdf5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min', period=2), ProgbarLogger()]) return model
def __init__(self, filepath, model, base_lr=1e-3, decay_rate=1, decay_after_n_epoch=10, patience=20, mode='min', monitor='val_loss'): self.base_lr = base_lr self.model = model self.decay_rate = decay_rate self.decay_after_n_epoch = decay_after_n_epoch self.callbacks = [ModelCheckpoint(filepath = filepath, monitor = monitor, verbose = 2, save_best_only = True, save_weights_only = True, mode = mode), EarlyStopping(monitor = monitor, patience = patience, verbose=2, mode = mode), LearningRateScheduler(self._scheduler)]
def _build_callbacks(self): """Build callback objects. Returns: A list containing the following callback objects: - TensorBoard - ModelCheckpoint """ tensorboard_path = os.path.join(self.checkpoints_path, 'tensorboard') tensorboard = TensorBoard(log_dir=tensorboard_path) checkpoint_path = os.path.join(self.checkpoints_path, self.checkpoint_file_format) checkpointer = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=self.save_best_only) return [tensorboard, checkpointer]
def get_callbacks(config_data, appendix=''): ret_callbacks = [] model_stored = False callbacks = config_data['callbacks'] if K._BACKEND == 'tensorflow': tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10) ret_callbacks.append(tensor_board) for callback in callbacks: if callback['name'] == 'early_stopping': ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode'])) elif callback['name'] == 'model_checkpoit': model_stored = True path = config_data['output_path'] basename = config_data['output_basename'] base_path = os.path.join(path, basename) opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix)) save_best = bool(callback['save_best_only']) ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode'])) return ret_callbacks, model_stored
def train_model(model, args, X_train, X_valid, y_train, y_valid): """ Train the model """ checkpoint = ModelCheckpoint('model-{epoch:03d}.h5', monitor='val_loss', verbose=0, save_best_only=args.save_best_only, mode='auto') model.compile(loss='mean_squared_error', optimizer=Adam(lr=args.learning_rate)) model.fit_generator(batch_generator(args.data_dir, X_train, y_train, args.batch_size, True), args.samples_per_epoch, args.nb_epoch, max_q_size=1, validation_data=batch_generator(args.data_dir, X_valid, y_valid, args.batch_size, False), nb_val_samples=len(X_valid), callbacks=[checkpoint], verbose=1)
def train(self, train_data, validation_data, folder): context_data, question_data, answer_data, y_train = train_data context_data_v, question_data_v, answer_data_v, y_val = validation_data print("Model Fitting") filepath = folder + "structures/cos-lstm-nn" + VERSION + "-final-{epoch:02d}-{val_acc:.2f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=0, save_best_only=True, mode='max') model_json = self.model.to_json() with open(folder + "/structures/cos-lstm-model" + VERSION + ".json", "w") as json_file: json_file.write(model_json) self.model.summary() import numpy as np context_data = np.array(list(map(lambda x: x[:MAX_SEQUENCE_LENGTH_C], context_data))) context_data_v = np.array(list(map(lambda x: x[:MAX_SEQUENCE_LENGTH_C], context_data_v))) self.model.fit({'context': context_data, 'question': question_data, 'answer': answer_data}, y_train, validation_data=({'context': context_data_v, 'question': question_data_v, 'answer': answer_data_v}, y_val), epochs=50, batch_size=256, callbacks=[checkpoint], verbose=2)
def fit_cnn1(self, X33_train, Y_train, X33_unif_train, Y_unif_train): # Create temp cnn with input shape=(4,33,33,) input33 = Input(shape=(4, 33, 33)) output_cnn = self.one_block_model(input33) output_cnn = Reshape((5,))(output_cnn) # Cnn compiling temp_cnn = Model(inputs=input33, outputs=output_cnn) sgd = SGD(lr=self.learning_rate, momentum=self.momentum_rate, decay=self.decay_rate, nesterov=False) temp_cnn.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # Stop the training if the monitor function doesn't change after patience epochs earlystopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto') # Save model after each epoch to check/bm_epoch#-val_loss checkpointer = ModelCheckpoint(filepath="/home/ixb3/Scrivania/check/bm_{epoch:02d}-{val_loss:.2f}.hdf5", verbose=1) # First-phase training with uniformly distribuited training set temp_cnn.fit(x=X33_train, y=Y_train, batch_size=self.batch_size, epochs=self.nb_epoch, callbacks=[earlystopping, checkpointer], validation_split=0.3, verbose=1) # fix all the layers of the temporary cnn except the output layer for the second-phase temp_cnn = self.freeze_model(temp_cnn, freeze_output=False) # Second-phase training of the output layer with training set with real distribution probabily temp_cnn.fit(x=X33_unif_train, y=Y_unif_train, batch_size=self.batch_size, epochs=self.nb_epoch, callbacks=[earlystopping, checkpointer], validation_split=0.3, verbose=1) # set the weights of the first cnn to the trained weights of the temporary cnn self.cnn1.set_weights(temp_cnn.get_weights())
def train_model(model): opt = 'rmsprop' model_checkpoint = ModelCheckpoint( filepath=BEST_MODEL_PATH, monitor='val_acc', verbose=0, save_best_only=True, mode='auto' ) overfitting_stopper = EarlyStopping( monitor='val_acc', min_delta=0, patience=5, verbose=1, mode='auto' ) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model_history = model.fit(X_train, y_train, batch_size = 64, nb_epoch = 50, #shuffle = True, validation_split = 0.2, #verbose = 2 callbacks = [overfitting_stopper, model_checkpoint] )
def EES_train(): EES = model_EES16() EES.compile(optimizer=adam(lr=0.0003), loss='mse') print EES.summary() data, label = pd.read_training_data("./train.h5") val_data, val_label = pd.read_training_data("./val.h5") checkpoint = ModelCheckpoint("EES_check.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False, mode='min') callbacks_list = [checkpoint] history_callback = EES.fit(data, label, batch_size=64, validation_data=(val_data, val_label), callbacks=callbacks_list, shuffle=True, nb_epoch=200, verbose=1) pandas.DataFrame(history_callback.history).to_csv("history.csv") EES.save_weights("EES_final.h5")
def build_keras_fit_callbacks(model_path): return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=0 ) ]
def build_keras_fit_callbacks(model_path): from keras import callbacks return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=0 ) ]
def keras_fit_callbacks(model_path): from keras import callbacks return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=1 ) ]
def train_model(self, batch_size, epochs, path): filepath = "./traindata/checkpoints/weights-{epoch:02d}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] self.model.fit(self.x_data, self.y_data, batch_size=batch_size, epochs=epochs, callbacks=callbacks_list) self.model.save_weights(path)