我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.callbacks.EarlyStopping()。
def start_train(self): """ Starts to Train the entire Model Based on set Parameters """ # 1, Prep callback = [EarlyStopping(patience=self.Patience), ReduceLROnPlateau(patience=5, verbose=1), CSVLogger(filename=self.rnn_type+'log.csv'), ModelCheckpoint(self.rnn_type + '_' + self.dataset + '.check', save_best_only=True, save_weights_only=True)] # 2, Train self.model.fit(x = [self.train[0],self.train[1]], y = self.train[2], batch_size = self.BatchSize, epochs = self.MaxEpoch, validation_data=([self.test[0], self.test[1]], self.test[2]), callbacks = callback) # 3, Evaluate self.model.load_weights(self.rnn_type + '_' + self.dataset + '.check') # revert to the best model self.evaluate_on_test()
def model(data, hidden_layers, hidden_neurons, output_file, validation_split=0.9): train_n = int(validation_split * len(data)) batch_size = 50 train_data = data[:train_n,:] val_data = data[train_n:,:] input_sh = Input(shape=(data.shape[1],)) encoded = noise.GaussianNoise(0.2)(input_sh) for i in range(hidden_layers): encoded = Dense(hidden_neurons[i], activation='relu')(encoded) encoded = noise.GaussianNoise(0.2)(encoded) decoded = Dense(hidden_neurons[-2], activation='relu')(encoded) for j in range(hidden_layers-3,-1,-1): decoded = Dense(hidden_neurons[j], activation='relu')(decoded) decoded = Dense(data.shape[1], activation='sigmoid')(decoded) autoencoder = Model(input=input_sh, output=decoded) autoencoder.compile(optimizer='adadelta', loss='mse') checkpointer = ModelCheckpoint(filepath='data/bestmodel' + output_file + ".hdf5", verbose=1, save_best_only=True) earlystopper = EarlyStopping(monitor='val_loss', patience=15, verbose=1) train_generator = DataGenerator(batch_size) train_generator.fit(train_data, train_data) val_generator = DataGenerator(batch_size) val_generator.fit(val_data, val_data) autoencoder.fit_generator(train_generator, samples_per_epoch=len(train_data), nb_epoch=100, validation_data=val_generator, nb_val_samples=len(val_data), max_q_size=batch_size, callbacks=[checkpointer, earlystopper]) enco = Model(input=input_sh, output=encoded) enco.compile(optimizer='adadelta', loss='mse') reprsn = enco.predict(data) return reprsn
def train_multilabel_bts(lang_db, imdb, pretrained, max_iters = 1000, loss_func = 'squared_hinge', box_method = 'random'): # Create callback_list. dir_path = osp.join('output', 'bts_ckpt', imdb.name) tensor_path = osp.join(dir_path, 'log_dir') if not osp.exists(dir_path): os.makedirs(dir_path) if not osp.exists(tensor_path): os.makedirs(tensor_path) ckpt_save = osp.join(dir_path, lang_db.name + '_multi_label_fixed_' + 'weights-{epoch:02d}.hdf5') checkpoint = ModelCheckpoint(ckpt_save, monitor='loss', verbose=1, save_best_only=True) early_stop = EarlyStopping(monitor='loss', min_delta=0, patience=3, verbose=0, mode='auto') tensorboard = TensorBoard(log_dir=dir_path, histogram_freq=2000, write_graph=True, write_images=False) callback_list = [checkpoint, early_stop, tensorboard] pretrained.fit_generator(load_multilabel_data(imdb, lang_db, pretrained, box_method), steps_per_epoch = 5000, epochs = max_iters, verbose = 1, callbacks = callback_list, workers = 1) pretrained.save(osp.join(dir_path, 'model_fixed' + imdb.name + '_' + lang_db.name + '_ML_' + box_method + '_' + loss_func + '.hdf5'))
def validate(model, X, y, nb_epoch=25, batch_size=128, stop_early=True, folds=10, test_size=None, shuffle=True, verbose=True): early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=0, mode='auto') total_score = [] if test_size is None: if folds == 1: test_size = 0.25 else: test_size = 1 - (1. / folds) kf = ShuffleSplit(n_splits=folds, test_size=test_size) for fold, (train_index, test_index) in enumerate(kf.split(X, y)): shuffle_weights(model) if fold > 0: print("FOLD:", fold) print("-" * 40) model.reset_states() callbacks = [early_stopping] if True else None hist = model.fit(X[train_index], y[train_index], batch_size=batch_size, shuffle=shuffle, validation_data=(X[test_index], y[test_index]), callbacks=[early_stopping], verbose=verbose) total_score.append(hist.history["val_acc"][-1]) return np.mean(total_score)
def test_EarlyStopping_reuse(): patience = 3 data = np.random.random((100, 1)) labels = np.where(data > 0.5, 1, 0) model = Sequential(( Dense(1, input_dim=1, activation='relu'), Dense(1, activation='sigmoid'), )) model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) stopper = callbacks.EarlyStopping(monitor='acc', patience=patience) weights = model.get_weights() hist = model.fit(data, labels, callbacks=[stopper]) assert len(hist.epoch) >= patience # This should allow training to go for at least `patience` epochs model.set_weights(weights) hist = model.fit(data, labels, callbacks=[stopper]) assert len(hist.epoch) >= patience
def finetuning_callbacks(checkpoint_path, patience, verbose): """ Callbacks for model training. # Arguments: checkpoint_path: Where weight checkpoints should be saved. patience: Number of epochs with no improvement after which training will be stopped. # Returns: Array with training callbacks that can be passed straight into model.fit() or similar. """ cb_verbose = (verbose >= 2) checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path, save_best_only=True, verbose=cb_verbose) earlystop = EarlyStopping(monitor='val_loss', patience=patience, verbose=cb_verbose) return [checkpointer, earlystop]
def init_callbacks(self, for_worker=False): """Prepares all keras callbacks to be used in training. Automatically attaches a History callback to the end of the callback list. If for_worker is True, leaves out callbacks that only make sense with validation enabled.""" import keras.callbacks as cbks remove_for_worker = [cbks.EarlyStopping, cbks.ModelCheckpoint] if for_worker: for obj in remove_for_worker: self.callbacks_list = [ c for c in self.callbacks_list if not isinstance(c, obj) ] self.model.history = cbks.History() self.callbacks = cbks.CallbackList( self.callbacks_list + [self.model.history] ) # it's possible to callback a different model than self # (used by Sequential models) if hasattr(self.model, 'callback_model') and self.model.callback_model: self.callback_model = self.model.callback_model else: self.callback_model = self.model self.callbacks.set_model(self.callback_model) self.callback_model.stop_training = False
def train(self, X_train, V, seed): X_train = sequence.pad_sequences(X_train, maxlen=self.max_len) np.random.seed(seed) X_train = np.random.permutation(X_train) np.random.seed(seed) V = np.random.permutation(V) print("Train...CNN module") #history = self.model.fit({'input': X_train, 'output': V}, # verbose=0, batch_size=self.batch_size, nb_epoch=self.nb_epoch, shuffle=True, validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', patience=0)]) history = self.model.fit(X_train,y=V,batch_size=self.batch_size,nb_epoch=self.nb_epoch, shuffle=True, validation_split=0.1, callbacks=[EarlyStopping(monitor='val_loss', patience=0)]) cnn_loss_his = history.history['loss'] cmp_cnn_loss = sorted(cnn_loss_his)[::-1] if cnn_loss_his != cmp_cnn_loss: self.nb_epoch = 1 return history
def train_chrom_labeller(model, train_tuple, valid_tuple, save_weight_hd5): checkpointer = ModelCheckpoint(filepath=save_weight_hd5, verbose=1, save_best_only=True) earlystopper = EarlyStopping(monitor='val_loss', patience=3, verbose=1) history = model.fit(train_tuple[0], train_tuple[1], batch_size=32, nb_epoch=150, shuffle=False, validation_data=(valid_tuple[0], valid_tuple[1]), callbacks=[checkpointer,earlystopper]) plot_metric_history(history, weight_path_to_title(save_weight_hd5)) return model # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # ~~~~~~~ Training Data ~~~~~~~~~~~ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def fit(self, X_trains, y_train): X_train1, X_train2, X_train3 = X_trains main_target, X1_vid = y_train early_stopping = EarlyStopping(monitor='val_loss', patience=2) print(X_train1.shape) print(X1_vid.shape) print(main_target.shape) self.model.fit({'X1': X_train1, 'X2': X_train2, 'X3': X_train3}, {'main_output': main_target, 'aux_output': X1_vid}, batch_size=self.batch_size, nb_epoch=self.nb_epoch, verbose=1, validation_data=([X_train1, X_train2, X_train3], y_train), callbacks=[early_stopping]) y_target = np.argmax(X1_vid, axis=1) y_predict = np.argmax(self.vision_model.predict(X_train1, verbose=0), axis=1) conf_mat = confusion_matrix(y_target, y_predict) print('Test accuracy:') n_correct = np.sum(np.diag(conf_mat)) print('# correct:', n_correct, 'out of', len(y_target), ', acc=', float(n_correct) / len(y_target))
def LSTM(self, argsDict): self.paras.batch_size = argsDict["batch_size"] self.paras.model['dropout'] = argsDict['dropout'] self.paras.model['activation'] = argsDict["activation"] self.paras.model['optimizer'] = argsDict["optimizer"] self.paras.model['learning_rate'] = argsDict["learning_rate"] print(self.paras.batch_size, self.paras.model['dropout'], self.paras.model['activation'], self.paras.model['optimizer'], self.paras.model['learning_rate']) model = self.lstm_model() model.fit(self.train_x, self.train_y, batch_size=self.paras.batch_size, epochs=self.paras.epoch, verbose=0, callbacks=[EarlyStopping(monitor='loss', patience=5)] ) score, mse = model.evaluate(self.test_x, self.test_y, verbose=0) y_pred=model.predict(self.test_x) reca=Recall_s(self.test_y,y_pred) return -reca
def fit(self, X_train, y_train, X_test, y_test, batch_size=50, nb_epoch=3): """ :param X_train: each instance is a list of word index :param y_train: :return: """ print(len(X_train), 'train sequences') print(len(X_test), 'test sequences') print("Pad sequences (samples x time)") X_train = sequence.pad_sequences(X_train, maxlen=self.maxlen) X_test = sequence.pad_sequences(X_test, maxlen=self.maxlen) print('X_train shape:', X_train.shape) print('X_test shape:', X_test.shape) y_train = expand_label(y_train) y_test = expand_label(y_test) #early stopping early_stop = EarlyStopping(monitor='val_loss', patience=2) self.model.fit({'input': X_train, 'output': y_train}, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=({'input': X_test, 'output': y_test}), callbacks=[early_stop])
def get_callbacks(config_data, appendix=''): ret_callbacks = [] model_stored = False callbacks = config_data['callbacks'] if K._BACKEND == 'tensorflow': tensor_board = TensorBoard(log_dir=os.path.join('logging', config_data['tb_log_dir']), histogram_freq=10) ret_callbacks.append(tensor_board) for callback in callbacks: if callback['name'] == 'early_stopping': ret_callbacks.append(EarlyStopping(monitor=callback['monitor'], patience=callback['patience'], verbose=callback['verbose'], mode=callback['mode'])) elif callback['name'] == 'model_checkpoit': model_stored = True path = config_data['output_path'] basename = config_data['output_basename'] base_path = os.path.join(path, basename) opath = os.path.join(base_path, 'best_model{}.h5'.format(appendix)) save_best = bool(callback['save_best_only']) ret_callbacks.append(ModelCheckpoint(filepath=opath, verbose=callback['verbose'], save_best_only=save_best, monitor=callback['monitor'], mode=callback['mode'])) return ret_callbacks, model_stored
def fit(self, X, y, eval_set=None, class_weight=None, show_accuracy=True): if self.loss == 'categorical_crossentropy': y = np_utils.to_categorical(y) if eval_set != None and self.loss == 'categorical_crossentropy': eval_set = (eval_set[0], np_utils.to_categorical(eval_set[1])) self.model = self._build_model(self.input_dim,self.output_dim,self.hidden_units,self.activation, self.dropout, self.loss, self.optimizer, self.class_mode) if eval_set !=None: early_stopping = EarlyStopping(monitor='val_loss', patience=self.esr, verbose=1, mode='min') logs = self.model.fit(X, y, self.batch_size, self.nb_epoch, self.verbose, validation_data=eval_set, callbacks=[early_stopping], show_accuracy=True, shuffle=True) else: logs = self.model.fit(X, y, self.batch_size, self.nb_epoch, self.verbose, show_accuracy=True, shuffle=True) return logs
def train_sequential(model, X, y, where_to_save, fit_params=None, monitor='val_acc'): # TODO: DOCUMENT once thoroughly tested # Watch out: where_to_save might be inside fit_params if fit_params is None: fit_params = { "batch_size": 32, "nb_epoch": 45, "verbose": True, "validation_split": 0.15, "show_accuracy": True, "callbacks": [EarlyStopping(verbose=True, patience=5, monitor=monitor), ModelCheckpoint(where_to_save, monitor=monitor, verbose=True, save_best_only=True)] } print 'Fitting! Hit CTRL-C to stop early...' history = "Nothing to show" try: history = model.fit(X, y, **fit_params) except KeyboardInterrupt: print "Training stopped early!" history = model.history return history
def unet_fit(name, start_t, end_t, start_v, end_v, check_name = None): t = time.time() callbacks = [EarlyStopping(monitor='val_loss', patience = 15, verbose = 1), ModelCheckpoint('/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(name), monitor='val_loss', verbose = 0, save_best_only = True)] if check_name is not None: check_model = '/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(check_name) model = load_model(check_model, custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef}) else: model = unet_model() model.fit_generator(generate_train(start_t, end_t), nb_epoch = 150, verbose = 1, validation_data = generate_val(start_v, end_v), callbacks = callbacks, samples_per_epoch = 551, nb_val_samples = 50) return # In[5]:
def cnn3d_genfit(name, nn_model, epochs, start_t, end_t, start_v, end_v, nb_train, nb_val, check_name = None): callbacks = [EarlyStopping(monitor='val_loss', patience = 15, verbose = 1), ModelCheckpoint('/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(name), monitor='val_loss', verbose = 0, save_best_only = True)] if check_name is not None: check_model = '/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(check_name) model = load_model(check_model) else: model = nn_model model.fit_generator(generate_train(start_t, end_t), nb_epoch = epochs, verbose = 1, validation_data = generate_val(start_v, end_v), callbacks = callbacks, samples_per_epoch = nb_train, nb_val_samples = nb_val) return
def cnn_genfit(name, batch_size, cnn, samples_tr, samples_val, start_tr, end_tr, start_val, end_val): callbacks = [EarlyStopping(monitor='val_loss', patience = 3, verbose = 1), ModelCheckpoint('/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(name), monitor='val_loss', verbose = 0, save_best_only = True)] model = cnn() model.fit_generator(generate_train(start_tr, end_tr, batch_size), nb_epoch = 25, verbose = 1, callbacks = callbacks, samples_per_epoch = samples_tr, validation_data = generate_val(start_val, end_val, batch_size), nb_val_samples = samples_val) return #samplestr = check_shapes(0, 1398) #samplesval = check_shapes(1398, 1594)
def fit_cnn1(self, X33_train, Y_train, X33_unif_train, Y_unif_train): # Create temp cnn with input shape=(4,33,33,) input33 = Input(shape=(4, 33, 33)) output_cnn = self.one_block_model(input33) output_cnn = Reshape((5,))(output_cnn) # Cnn compiling temp_cnn = Model(inputs=input33, outputs=output_cnn) sgd = SGD(lr=self.learning_rate, momentum=self.momentum_rate, decay=self.decay_rate, nesterov=False) temp_cnn.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # Stop the training if the monitor function doesn't change after patience epochs earlystopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto') # Save model after each epoch to check/bm_epoch#-val_loss checkpointer = ModelCheckpoint(filepath="/home/ixb3/Scrivania/check/bm_{epoch:02d}-{val_loss:.2f}.hdf5", verbose=1) # First-phase training with uniformly distribuited training set temp_cnn.fit(x=X33_train, y=Y_train, batch_size=self.batch_size, epochs=self.nb_epoch, callbacks=[earlystopping, checkpointer], validation_split=0.3, verbose=1) # fix all the layers of the temporary cnn except the output layer for the second-phase temp_cnn = self.freeze_model(temp_cnn, freeze_output=False) # Second-phase training of the output layer with training set with real distribution probabily temp_cnn.fit(x=X33_unif_train, y=Y_unif_train, batch_size=self.batch_size, epochs=self.nb_epoch, callbacks=[earlystopping, checkpointer], validation_split=0.3, verbose=1) # set the weights of the first cnn to the trained weights of the temporary cnn self.cnn1.set_weights(temp_cnn.get_weights())
def train_model(model): opt = 'rmsprop' model_checkpoint = ModelCheckpoint( filepath=BEST_MODEL_PATH, monitor='val_acc', verbose=0, save_best_only=True, mode='auto' ) overfitting_stopper = EarlyStopping( monitor='val_acc', min_delta=0, patience=5, verbose=1, mode='auto' ) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) model_history = model.fit(X_train, y_train, batch_size = 64, nb_epoch = 50, #shuffle = True, validation_split = 0.2, #verbose = 2 callbacks = [overfitting_stopper, model_checkpoint] )
def BiGRU(X_train, y_train, X_test, y_test, gru_units, dense_units, input_shape, \ batch_size, epochs, drop_out, patience): model = Sequential() reg = L1L2(l1=0.2, l2=0.2) model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer = reg, return_sequences = True), input_shape = input_shape, merge_mode="concat")) model.add(BatchNormalization()) model.add(TimeDistributed(Dense(dense_units, activation='relu'))) model.add(BatchNormalization()) model.add(Bidirectional(GRU(units = gru_units, dropout= drop_out, activation='relu', recurrent_regularizer=reg, return_sequences = True), merge_mode="concat")) model.add(BatchNormalization()) model.add(Dense(units=1)) model.add(GlobalAveragePooling1D()) print(model.summary()) early_stopping = EarlyStopping(monitor="val_loss", patience = patience) model.compile(loss='mse', optimizer= 'adam') history_callback = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,\ verbose=2, callbacks=[early_stopping], validation_data=[X_test, y_test], shuffle = True) return model, history_callback
def build_keras_fit_callbacks(model_path): return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=0 ) ]
def build_keras_fit_callbacks(model_path): from keras import callbacks return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=0 ) ]
def keras_fit_callbacks(model_path): from keras import callbacks return [ callbacks.EarlyStopping( monitor='val_loss', patience=20 #verbose=1 ), callbacks.ModelCheckpoint( model_path, monitor='val_loss', save_best_only=True, save_weights_only=True, verbose=0 ), callbacks.ReduceLROnPlateau( monitor='val_loss', min_lr=1e-7, factor=0.2, verbose=1 ) ]
def fit(self, X_train, y_train, X_val, y_val, nb_classes=None, batch_size=10, nb_epoch=20, verbose=0): model = self.model if nb_classes is None: nb_classes = max(set(y_train)) + 1 Y_train = np_utils.to_categorical(y_train, nb_classes) Y_val = np_utils.to_categorical(y_val, nb_classes) model.reset_states() earlyStopping = callbacks.EarlyStopping( monitor='val_loss', patience=3, verbose=verbose, mode='auto') X_train, X_val = self.X_reshape(X_train, X_val) history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, validation_data=(X_val, Y_val), callbacks=[earlyStopping]) self.nb_classes = nb_classes self.history = history
def fit(self, X_train, Y_train, X_val, Y_val, batch_size=10, nb_epoch=20, verbose=0): model = self.model # if nb_classes is None: # nb_classes = max( set( y_train)) + 1 #Y_train = np_utils.to_categorical(y_train, nb_classes) #Y_val = np_utils.to_categorical(y_val, nb_classes) model.reset_states() earlyStopping = callbacks.EarlyStopping( monitor='val_loss', patience=3, verbose=verbose, mode='auto') X_train, X_val = self.X_reshape(X_train, X_val) history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, validation_data=(X_val, Y_val), callbacks=[earlyStopping]) #self.nb_classes = nb_classes self.history = history
def fit( self, X_train, Y_train, X_val, Y_val, batch_size=10, nb_epoch=20, verbose = 0): model = self.model #if nb_classes is None: # nb_classes = max( set( y_train)) + 1 #Y_train = np_utils.to_categorical(y_train, nb_classes) #Y_val = np_utils.to_categorical(y_val, nb_classes) model.reset_states() earlyStopping=callbacks.EarlyStopping(monitor='val_loss', patience=3, verbose=verbose, mode='auto') X_train, X_val = self.X_reshape( X_train, X_val) history = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, validation_data=(X_val, Y_val), callbacks=[earlyStopping]) #self.nb_classes = nb_classes self.history = history