我们从Python开源项目中,提取了以下33个代码示例,用于说明如何使用keras.callbacks.Callback()。
def __init__(self, monitor='val_loss', patience=0, verbose=0, mode='auto'): super(Callback, self).__init__() self.monitor = monitor self.patience = patience self.verbose = verbose self.wait = 0 self.best_epoch = 0 if mode == 'min': self.monitor_op = np.less self.best = np.Inf elif mode == 'max': self.monitor_op = np.greater self.best = -np.Inf else: if 'acc' in self.monitor: self.monitor_op = np.greater self.best = -np.Inf else: self.monitor_op = np.less self.best = np.Inf
def __init__(self, monitor='val_loss', cut_ratio=0.5, patience=2, scheduled_start_epoch=1, scheduled_cut_ratio=1.): """ Args: monitor: quantity to be monitored. cut_ratio: cut the learning rate by this percent. patience: number of epochs with no improvement after which training will be stopped. scheduled_start_epoch: from which epoch to do scheduled learning rate discount scheduled_cut_ratio: learning rate discount ratio. """ super(Callback, self).__init__() self.monitor = monitor self.patience = patience self.best = np.Inf self.wait = 0 self.cut_ratio = cut_ratio self.monitor_decrease = False self.scheduled_start_epoch = scheduled_start_epoch self.scheduled_cut_ratio = scheduled_cut_ratio
def __init__(self, filepath, epoch_interval, verbose=0): """ In: filepath - formattable filepath; possibilities: * weights.{epoch:02d} * weights.{era:02d} epoch_interval - number of epochs that must be passed from the previous saving verbose - if nonzero then print out information on stdout; by default 0 """ super(KerasCallback, self).__init__() self.filepath = filepath self.epoch_interval = epoch_interval self.verbose = verbose self.era = 0
def __init__(self, patience=0, reduce_rate=0.5, reduce_nb=10, is_early_stopping=True, verbose=1): """ In: patience - number of beginning epochs without reduction; by default 0 reduce_rate - multiplicative rate reducer; by default 0.5 reduce_nb - maximal number of reductions performed; by default 10 is_early_stopping - if true then early stopping is applied when reduce_nb is reached; by default True verbose - verbosity level; by default 1 """ super(KerasCallback, self).__init__() self.patience = patience self.wait = 0 self.best_score = -1. self.reduce_rate = reduce_rate self.current_reduce_nb = 0 self.reduce_nb = reduce_nb self.is_early_stopping = is_early_stopping self.verbose = verbose self.epsilon = 0.1e-10
def __init__(self, patience=0, reduce_rate=0.5, reduce_nb=10, verbose=1): """ In: patience - number of epochs in stagnation; by default 0 reduce_rate - multiplicative rate reducer; by default 0.5 reduce_nb - maximal number of reductions performed; by default 10 verbose - verbosity level; by default 1 """ super(KerasCallback, self).__init__() self.patience = patience self.wait = 0 self.best_score = -1. self.reduce_rate = reduce_rate self.current_reduce_nb = 0 self.reduce_nb = reduce_nb self.is_early_stopping = False self.verbose = verbose self.epsilon = 0.1e-10
def create_callbacks(self, callback: Callable[[], None], tensor_board_log_directory: Path, net_directory: Path, callback_step: int = 1, save_step: int = 1) -> List[Callback]: class CustomCallback(Callback): def on_epoch_end(self_callback, epoch, logs=()): if epoch % callback_step == 0: callback() if epoch % save_step == 0 and epoch > 0: mkdir(net_directory) self.predictive_net.save_weights(str(net_directory / self.model_file_name(epoch))) tensorboard_if_running_tensorflow = [TensorBoard( log_dir=str(tensor_board_log_directory), write_images=True)] if backend.backend() == 'tensorflow' else [] return tensorboard_if_running_tensorflow + [CustomCallback()]
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.auc = 0 self.X_val, self.y_val = validation_data self.filepath = filepath self.mymil = mymil
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.prec = 0 self.X_val, self.y_val = validation_data self.filepath = filepath self.mymil = mymil
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.filepath = filepath self.reca = 0 self.X_val, self.y_val = validation_data self.mymil = mymil
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.filepath = filepath self.f1 = 0 self.X_val, self.y_val = validation_data self.mymil = mymil
def __init__(self, filepath, validation_data=(), interval=1, mymil=False): super(Callback, self).__init__() self.interval = interval self.filepath = filepath self.acc = 0 self.X_val, self.y_val = validation_data self.mymil = mymil
def __init__(self, filename): super(Callback, self).__init__() self.filename = filename
def train(self, dir_name: str, enable_checkpoints: bool = False, checkpoint: str = None) -> None: """ ???????? ??????. :param dir_name: ????? ? ???????? ??????. :param enable_checkpoints: ???????????? ?? ?????????. :param checkpoint: ???????? ?????????. """ # ?????????? ?????? x, y = self.load_dict() x, y = self.prepare_data(x, y) # ??????? ?? ???????. x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42) x_test, x_val, y_test, y_val = train_test_split(x_val, y_val, test_size=0.5, random_state=42) # ???????? ?????? ????????. callbacks = [EarlyStopping(monitor='val_acc', patience=2)] # type: List[Callback] if enable_checkpoints: checkpoint_name = os.path.join(dir_name, "{epoch:02d}-{val_loss:.2f}.hdf5") callbacks.append(ModelCheckpoint(checkpoint_name, monitor='val_loss')) if checkpoint is not None: self.load(checkpoint) self.model.fit(x_train, y_train, verbose=1, epochs=60, batch_size=self.batch_size, validation_data=(x_val, y_val), callbacks=callbacks) # ??????? ???????? ? word error rate ?? test ???????. accuracy = self.model.evaluate(x_test, y_test)[1] wer = self.evaluate_wer(x_test, y_test)[0] # ???? ????? ???????? ?? ???? ????????. self.model.fit(x, y, verbose=1, epochs=1, batch_size=self.batch_size) # ?????????? ??????. filename = "g2p_{language}_maxlen{maxlen}_B{rnn}{units1}_B{rnn}{units2}_dropout{dropout}_acc{acc}_wer{wer}.h5" filename = filename.format(language=self.language, rnn=self.rnn.__name__, units1=self.units1, units2=self.units2, dropout=self.dropout, acc=int(accuracy * 1000), wer=int(wer * 1000), maxlen=self.word_max_length) self.model.save(os.path.join(dir_name, filename))
def train(self, dir_name: str, enable_checkpoints: bool = False) -> None: """ ???????? ????. :param dir_name: ?????, ? ??????? ???????????? ??? ?????? ??????. :param enable_checkpoints: ???????????? ?? ?????????. """ # ?????????? ?????? x, y = self.__load_dict() x, y = self.__prepare_data(x, y) # ??????? ?? ???????. x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2, random_state=42) x_test, x_val, y_test, y_val = train_test_split(x_val, y_val, test_size=0.5, random_state=42) # ???????? ?????? ????????. callbacks = [EarlyStopping(monitor='val_acc', patience=3)] # type: List[Callback] if enable_checkpoints: checkpoint_name = os.path.join(dir_name, "checkpoint.hdf5") callbacks.append(ModelCheckpoint(checkpoint_name, monitor='val_loss')) self.model.fit(x_train, y_train, verbose=1, epochs=200, validation_data=(x_val, y_val), callbacks=callbacks, batch_size=self.batch_size) # ??????? ???????? ?? test ???????. accuracy = self.model.evaluate(x_test, y_test)[1] # ?????? WER ?? test ???????. wer = self.__evaluate_wer(x_test, y_test)[0] # ???? ????? ???????? ?? ???? ????????. self.model.fit(x, y, verbose=1, epochs=1, batch_size=self.batch_size) # ?????????? ??????. filename = "stress_{language}_{rnn}{units}_dropout{dropout}_acc{acc}_wer{wer}.h5" filename = filename.format(language=self.language, rnn=self.rnn.__name__, units=self.units, dropout=self.dropout, acc=int(accuracy * 100), wer=int(wer * 100)) self.model.save(os.path.join(dir_name, filename))
def train(self, dir_name: str, enable_checkpoints: bool = False) -> None: """ ???????? ????. :param dir_name: ?????, ? ??????? ???????????? ??? ?????? ??????. :param enable_checkpoints: ???????????? ?? ?????????. """ # ?????????? ?????? x, y = self.__load_dict() x, y = self.__prepare_data(x, y) # ??????? ?? ???????. x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.1, random_state=42) # ???????? ?????? ????????. callbacks = [EarlyStopping(monitor='val_acc', patience=3)] # type: List[Callback] if enable_checkpoints: checkpoint_name = os.path.join(dir_name, "{epoch:02d}-{val_loss:.2f}.hdf5") callbacks.append(ModelCheckpoint(checkpoint_name, monitor='val_loss')) self.model.fit(x_train, y_train, verbose=1, epochs=200, validation_data=(x_val, y_val), callbacks=callbacks, batch_size=self.batch_size) # ??????? ???????? ?? val ???????. accuracy = self.model.evaluate(x_val, y_val)[1] # ?????? WER ?? ???? ???????. wer = self.__evaluate_wer(x, y)[0] # ???? ????? ???????? ?? ???? ????????. self.model.fit(x, y, verbose=1, epochs=1, batch_size=self.batch_size) # ?????????? ??????. filename = "stress_{language}_{rnn}{units}_dropout{dropout}_acc{acc}_wer{wer}.h5" filename = filename.format(language=self.language, rnn=self.rnn.__name__, units=self.units, dropout=self.dropout, acc=int(accuracy * 100), wer=int(wer * 100)) self.model.save(os.path.join(dir_name, filename))
def fit(self, dataset, settings): X_trn, y_trn, X_val, y_val, X_tst, y_tst = dataset y_trn = np_utils.to_categorical(y_trn, 10 if settings.dataset != 'cifar100' else 100) y_val = np_utils.to_categorical(y_val, 10 if settings.dataset != 'cifar100' else 100) y_tst = np_utils.to_categorical(y_tst, 10 if settings.dataset != 'cifar100' else 100) if len(y_tst) > 0 else [] settings.lrnparam = (settings.lrnparam[:1] + settings.lrnparam[2:]) self.model.compile(loss='categorical_crossentropy', optimizer=eval(settings.lrnalg)(*settings.lrnparam), metrics=["accuracy"]) class PerEpochTest(Callback): def on_epoch_begin(self, epoch, logs={}): self.tic = time.time() def on_epoch_end (self, epoch, logs={}): self.model.history.history['time'] = [] if 'time' not in self.model.history.history else self.model.history.history['time'] self.model.history.history['time'] += [time.time() - self.tic] self.model.history.history['tst_acc'] = [] if 'tst_acc' not in self.model.history.history else self.model.history.history['tst_acc'] self.model.history.history['tst_acc'] += [self.model.evaluate(X_tst, y_tst, batch_size=settings.batchsize, verbose=0)[1]] aug = augment(settings.dataset) if settings.augment else None arg = {'nb_epoch':settings.epoch, 'validation_data':(X_val, y_val), 'callbacks':[PerEpochTest()] if len(y_tst) > 0 else [], 'verbose':settings.verbose} if aug is None: self.model.fit ( X_trn, y_trn, batch_size=settings.batchsize, **arg) else : self.model.fit_generator(aug.flow(X_trn, y_trn, batch_size=settings.batchsize), samples_per_epoch=len(X_trn), nb_worker=4, pickle_safe=True, **arg) return self.model.history.history
def pretrain(self, x, y=None, optimizer='adam', epochs=200, batch_size=256, save_dir='results/temp'): print('...Pretraining...') self.autoencoder.compile(optimizer=optimizer, loss='mse') csv_logger = callbacks.CSVLogger(save_dir + '/pretrain_log.csv') cb = [csv_logger] if y is not None: class PrintACC(callbacks.Callback): def __init__(self, x, y): self.x = x self.y = y super(PrintACC, self).__init__() def on_epoch_end(self, epoch, logs=None): if epoch % int(epochs/10) != 0: return feature_model = Model(self.model.input, self.model.get_layer( 'encoder_%d' % (int(len(self.model.layers) / 2) - 1)).output) features = feature_model.predict(self.x) km = KMeans(n_clusters=len(np.unique(self.y)), n_init=20, n_jobs=4) y_pred = km.fit_predict(features) # print() print(' '*8 + '|==> acc: %.4f, nmi: %.4f <==|' % (metrics.acc(self.y, y_pred), metrics.nmi(self.y, y_pred))) cb.append(PrintACC(x, y)) # begin pretraining t0 = time() self.autoencoder.fit(x, x, batch_size=batch_size, epochs=epochs, callbacks=cb) print('Pretraining time: ', time() - t0) self.autoencoder.save_weights(save_dir + '/ae_weights.h5') print('Pretrained weights are saved to %s/ae_weights.h5' % save_dir) self.pretrained = True
def __init__(self): super(Callback, self).__init__() self.stopped = False
def __init__(self, output_dir, num_identities, batch_size=32, use_yale=False, use_jaffe=False): """ Constructor for a GenerateIntermediate object. Args: output_dir (str): Directory to save intermediate results in. num_identities (int): Number of identities in the training set. Args: (optional) batch_size (int): Batch size to use when generating images. """ super(Callback, self).__init__() self.output_dir = output_dir self.num_identities = num_identities self.batch_size = batch_size self.use_yale = use_yale self.use_jaffe = use_jaffe self.parameters = dict() # Sweep through identities self.parameters['identity'] = np.eye(num_identities) if use_yale: # Use pose 0, lighting at 0deg azimuth and elevation self.parameters['pose'] = np.zeros((num_identities, NUM_YALE_POSES)) self.parameters['lighting'] = np.zeros((num_identities, 4)) for i in range(0, num_identities): self.parameters['pose'][i,0] = 0 self.parameters['lighting'][i,1] = 1 self.parameters['lighting'][i,3] = 1 else: # Make all have neutral expressions, front-facing self.parameters['emotion'] = np.empty((num_identities, Emotion.length())) self.parameters['orientation'] = np.zeros((num_identities, 2)) for i in range(0, num_identities): self.parameters['emotion'][i,:] = Emotion.neutral self.parameters['orientation'][i,1] = 1
def extract_features(self, img_data): features = self.cnn.evaluate(img_data) return features # class ROCCallback(Callback): # # def __init__(self, training_data, validation_data): # super(Roc).__init__ # self.x = training_data[0] # self.y = training_data[1] # self.x_val = validation_data[0] # self.y_val = validation_data[1] # # def on_train_begin(self, logs={}): # return # # def on_train_end(self, logs={}): # return # # def on_epoch_begin(self, epoch, logs={}): # return # # def on_epoch_end(self, epoch, logs={}): # y_pred = self.model.predict(self.x) # roc = roc_auc_score(self.y, y_pred) # # y_pred_val = self.model.predict(self.x_val) # roc_val = roc_auc_score(self.y_val, y_pred_val) # # print( # '\rroc-auc: %s - roc-auc_val: %s' % (str(round(roc, 4)), str(round(roc_val, 4))), end=100 * ' ' + '\n') # return # # def on_batch_begin(self, batch, logs={}): # return # # def on_batch_end(self, batch, logs={}): # return
def __init__(self, model, chkp_dir, nb_step_chkp=100, max_to_keep=10, keep_checkpoint_every_n_hours=1): super(Callback, self).__init__() if K._BACKEND == 'tensorflow': import tensorflow as tf self.saver = tf.train.Saver(var_list=None, max_to_keep=max_to_keep, keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours) else: self.saver = None self.model = model self.archi = model.to_json() self.chkp_dir = chkp_dir if not os.path.isdir(self.chkp_dir): os.makedirs(self.chkp_dir) self.global_step = 0 self.nb_step_chkp = nb_step_chkp
def __init__(self, n_classes, savepath, train_metrics, valid_metrics, best_metric, best_type, verbose=False, *args): super(Callback, self).__init__() # Save input parameters self.n_classes = n_classes self.savepath = savepath self.verbose = verbose self.train_metrics = train_metrics self.valid_metrics = valid_metrics self.best_metric = best_metric self.best_type = best_type
def __init__(self, n_classes, *args): super(Callback, self).__init__() # Save input parameters self.n_classes = n_classes self.I = np.zeros(self.n_classes) self.U = np.zeros(self.n_classes) self.jacc_percl = np.zeros(self.n_classes) self.val_I = np.zeros(self.n_classes) self.val_U = np.zeros(self.n_classes) self.val_jacc_percl = np.zeros(self.n_classes) self.remove_metrics = [] for i in range(n_classes): self.remove_metrics.append('I' + str(i)) self.remove_metrics.append('U' + str(i)) self.remove_metrics.append('val_I' + str(i)) self.remove_metrics.append('val_U' + str(i)) self.add_metrics = [] self.add_metrics.append('jaccard') self.add_metrics.append('val_jaccard') for i in range(n_classes): self.add_metrics.append(str(i) + '_jacc') self.add_metrics.append(str(i) + '_val_jacc') setattr(ProgbarLogger, 'add_metrics', self.add_metrics) setattr(ProgbarLogger, 'remove_metrics', self.remove_metrics) setattr(ProgbarLogger, '_set_params', progbar__set_params) setattr(ProgbarLogger, 'on_batch_end', progbar_on_batch_end) setattr(ProgbarLogger, 'on_epoch_end', progbar_on_epoch_end)
def __init__(self, n_classes, void_label, save_path, generator, epoch_length, color_map, classes, tag, n_legend_rows=1, *args): super(Callback, self).__init__() self.n_classes = n_classes self.void_label = void_label self.save_path = save_path self.generator = generator self.epoch_length = epoch_length self.color_map = color_map self.classes = classes self.n_legend_rows = n_legend_rows self.tag = tag
def __init__(self, validation_data=(), interval=10): super(Callback, self).__init__() self.interval = interval self.X_val, self.y_val = validation_data
def __init__(self, X_val, y_val): import numpy as np # initiative Callback from parent class super(Callback, self).__init__() self.X_val, self.y_val = np.array(X_val), np.argmax(y_val)
def __init__(self, X_val, y_val): import numpy as np # initiative Callback from parent class super(Callback, self).__init__() self.X_val, self.y_val = np.array(X_val), np.array(y_val)
def __init__(self, X_val, y_val): import numpy as np # initiative Callback from parent class super(Callback, self).__init__() self.X_val, self.y_val = np.array(X_val), np.argmax(y_val, axis=1)
def train(Xtrain, ytrain, Xtrain_norm, ytrain_norm, Xvalidate, yvalidate, space): import sys from keras.optimizers import RMSprop from keras.callbacks import Callback class CorrelationEarlyStopping(Callback): def __init__(self, monitor='validate', patience=0, delta=.001): """ :param monitor: 'validate' or 'train' :param patience: how many epochs to wait :param delta: by how much the monitored value has to be greater than the last maximum """ self.rvalues = {'train': [], 'validate': []} self.monitor = monitor # validate, train self.patience = patience self.delta = delta self.wait = 0 self.best = 0 self.num_epochs = 0 self.best_model = None def on_epoch_end(self, epoch, logs={}): r2 = get_metrics(self.model, x=Xtrain_norm, y=ytrain_norm) self.rvalues['train'].append(r2) r2 = get_metrics(self.model, x=Xvalidate, y=yvalidate) self.rvalues['validate'].append(r2) print ('\n\tTrain r2: {}\n\tValidate r2: {}\n'.format(self.rvalues['train'][-1], self.rvalues['validate'][-1])) sys.stdout.flush() if self.rvalues[self.monitor][-1] - self.delta >= self.best: self.best = self.rvalues[self.monitor][-1] self.wait = 0 self.num_epochs = epoch self.best_model = self.model else: if self.wait >= self.patience: self.num_epochs = epoch - self.patience self.model.stop_training = True else: self.num_epochs = epoch self.wait += 1 model = vgg_variant(space) lr = 10**(-space['learning_rate']) rmsprop = RMSprop(lr=lr, rho=0.9, epsilon=1e-08) model.compile(loss='mean_squared_error', optimizer=rmsprop) monitor = CorrelationEarlyStopping(monitor='validate', patience=6, delta=0.01) gen = data_generator(Xtrain, ytrain, batch_size=space['batch_size'], space=space, weighted_sampling=space['weighted_sampling'], augment=space['augment'], sampling_factor=space['sampling_factor'], sampling_intercept=space['sampling_intercept']) model.fit_generator(gen, space['samples_per_epoch'], 50, 1, [monitor], (Xvalidate, yvalidate)) return monitor.best_model, monitor.rvalues
def keras(self): """ Returns an object that implements the Keras Callback interface. This method initializes the Keras callback lazily to to prevent any possible import issues from affecting users who don't use it, as well as prevent it from importing Keras/tensorflow and all of their accompanying baggage unnecessarily in the case that they happened to be installed, but the user is not using them. """ cb = self._callbacks.get(KERAS) # Keras is not importable if cb is False: return None # If this is the first time, try and import Keras if not cb: # Check if Keras is installed and fallback gracefully try: from keras.callbacks import Callback as KerasCallback class _KerasCallback(KerasCallback): """_KerasCallback implement KerasCallback using an injected Experiment. # TODO: Decide if we want to handle the additional callbacks: # 1) on_epoch_begin # 2) on_batch_begin # 3) on_batch_end # 4) on_train_begin # 5) on_train_end """ def __init__(self, exp): super(_KerasCallback, self).__init__() self._exp = exp def on_epoch_end(self, epoch, logs=None): if not logs: logs = {} val_acc = logs.get("val_acc") val_loss = logs.get("val_loss") if val_acc is not None: self._exp.metric("val_acc", val_acc) if val_loss is not None: self._exp.metric("val_loss", val_loss) cb = _KerasCallback(self._exp) self._callbacks[KERAS] = cb return cb except ImportError: # Mark Keras as unimportable for future calls self._callbacks[KERAS] = False return None return cb # Version of Experiment with a different name for use internally, should not be used directly by consumers
def __init__(self, model, log_dir, histogram_freq=0, image_freq=0, audio_freq=0, write_graph=False): super(Callback, self).__init__() if K._BACKEND != 'tensorflow': raise Exception('TensorBoardBatch callback only works ' 'with the TensorFlow backend.') import tensorflow as tf self.tf = tf import keras.backend.tensorflow_backend as KTF self.KTF = KTF self.log_dir = log_dir self.histogram_freq = histogram_freq self.image_freq = image_freq self.audio_freq = audio_freq self.histograms = None self.images = None self.write_graph = write_graph self.iter = 0 self.scalars = [] self.images = [] self.audios = [] self.model = model self.sess = KTF.get_session() if self.histogram_freq != 0: layers = self.model.layers for layer in layers: if hasattr(layer, 'name'): layer_name = layer.name else: layer_name = layer if hasattr(layer, 'W'): name = '{}_W'.format(layer_name) tf.histogram_summary(name, layer.W, collections=['histograms']) if hasattr(layer, 'b'): name = '{}_b'.format(layer_name) tf.histogram_summary(name, layer.b, collections=['histograms']) if hasattr(layer, 'output'): name = '{}_out'.format(layer_name) tf.histogram_summary(name, layer.output, collections=['histograms']) if self.image_freq != 0: tf.image_summary('input', self.model.input, max_images=2, collections=['images']) tf.image_summary('output', self.model.output, max_images=2, collections=['images']) if self.audio_freq != 0: tf.audio_summary('input', self.model.input, max_outputs=1, collections=['audios']) tf.audio_summary('output', self.model.output, max_outputs=1, collections=['audios']) if self.write_graph: if self.tf.__version__ >= '0.8.0': self.writer = self.tf.train.SummaryWriter(self.log_dir, self.sess.graph) else: self.writer = self.tf.train.SummaryWriter(self.log_dir, self.sess.graph_def) else: self.writer = self.tf.train.SummaryWriter(self.log_dir)