我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.models.load_model()。
def _loadTFGraph(self, sess, graph): """ Loads the Keras model into memory, then uses the passed-in session to load the model's inference-related ops into the passed-in Tensorflow graph. :return: A tuple (graph, input_name, output_name) where graph is the TF graph corresponding to the Keras model's inference subgraph, input_name is the name of the Keras model's input tensor, and output_name is the name of the Keras model's output tensor. """ keras_backend = K.backend() assert keras_backend == "tensorflow", \ "Only tensorflow-backed Keras models are supported, tried to load Keras model " \ "with backend %s." % (keras_backend) with graph.as_default(): K.set_learning_phase(0) # Inference phase model = load_model(self.getModelFile()) out_op_name = tfx.op_name(model.output, graph) stripped_graph = tfx.strip_and_freeze_until([out_op_name], graph, sess, return_graph=True) return stripped_graph, model.input.name, model.output.name
def generate_answer(img_path, question, model): model_path = 'weights/model_'+str(model)+'.h5' model = load_model(model_path) img_features = extract_image_features(img_path) seq = preprocess_question(question) if model == 1: x = [img_features, seq] else: x = [img_features, seq, img_features] probabilities = model.predict(x)[0] answers = np.argsort(probabilities[:1000]) top_answers = [prepare_data.top_answers[answers[-1]], prepare_data.top_answers[answers[-2]], prepare_data.top_answers[answers[-3]]] return top_answers
def test_save_load(self): answer = self.model.predict(self.data) model_name = self.__class__.__name__ + '_temp.model' self.model.save(model_name) self.model = load_model( model_name, custom_objects=self.custom_objects ) os.remove(model_name) result = self.model.predict(self.data) self.assertEqual( result.shape, (self.data_size, self.max_length, self.encoding_size) ) np.testing.assert_array_almost_equal( answer, result )
def test_save_load(self): answer = self.model.predict(self.data) model_name = self.__class__.__name__ + '_temp.model' self.model.save(model_name) self.model = load_model( model_name, custom_objects=self.custom_objects ) os.remove(model_name) result = self.model.predict(self.data) self.assertEqual( result.shape, (self.data_size, self.encoding_size) ) np.testing.assert_array_almost_equal( answer, result )
def init_segmenter(args_segmenter_model): global segmenter_model, rings, sectors, points_per_ring, is_ped, tf_segmenter_graph segmenter_model = load_model(args_segmenter_model, compile=False) segmenter_model._make_predict_function() # https://github.com/fchollet/keras/issues/6124 print("Loading segmenter model " + args_segmenter_model) segmenter_model.summary() points_per_ring = segmenter_model.get_input_shape_at(0)[0][1] match = re.search(r'lidarnet-(car|ped)-.*seg-rings_(\d+)_(\d+)-sectors_(\d+)-.*\.hdf5', args_segmenter_model) is_ped = match.group(1) == 'ped' rings = range(int(match.group(2)), int(match.group(3))) sectors = int(match.group(4)) points_per_ring *= sectors assert len(rings) == segmenter_model.get_input_shape_at(0)[0][2] print('Loaded segmenter model with ' + str(points_per_ring) + ' points per ring and ' + str(len(rings)) + ' rings from ' + str(rings[0]) + ' to ' + str(rings[-1]) ) if K._backend == 'tensorflow': tf_segmenter_graph = tf.get_default_graph() print(tf_segmenter_graph) return
def on_epoch_begin(self, epoch, logs={}): if epoch > 0 and epoch % self.eval_frequency == 0: # Unhappy hack to work around h5py not being able to write to GCS. # Force snapshots and saves to local filesystem, then copy them over to GCS. model_path_glob = 'checkpoint.*' if not self.job_dir.startswith("gs://"): model_path_glob = os.path.join(self.job_dir, model_path_glob) checkpoints = glob.glob(model_path_glob) if len(checkpoints) > 0: checkpoints.sort() census_model = load_model(checkpoints[-1]) census_model = model.compile_model(census_model, self.learning_rate) loss, acc = census_model.evaluate_generator( model.generator_input(self.eval_files, chunk_size=CHUNK_SIZE), steps=self.steps) print '\nEvaluation epoch[{}] metrics[{:.2f}, {:.2f}] {}'.format( epoch, loss, acc, census_model.metrics_names) if self.job_dir.startswith("gs://"): copy_file_to_gcs(self.job_dir, checkpoints[-1]) else: print '\nEvaluation epoch[{}] (no checkpoints found)'.format(epoch)
def test_sequential_model_saving_2(): # test with custom optimizer, loss custom_opt = optimizers.rmsprop custom_loss = objectives.mse model = Sequential() model.add(Dense(2, input_dim=3)) model.add(Dense(3)) model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc']) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'custom_opt': custom_opt, 'custom_loss': custom_loss}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
def test_fuctional_model_saving(): input = Input(shape=(3,)) x = Dense(2)(input) output = Dense(3)(x) model = Model(input, output) model.compile(loss=objectives.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
def backupNetwork(self, model, backup): weightMatrix = [] for layer in model.layers: weights = layer.get_weights() weightMatrix.append(weights) # np.save('weightMatrix.npy', weightMatrix) # print(weightMatrix.shape) i = 0 for layer in backup.layers: weights = weightMatrix[i] layer.set_weights(weights) i += 1 # def loadWeights(self,path): # self.model.set_weights(load_model(path).get_weights())
def ensemble(): for fname in ['bestval{}.h5'.format(i) for i in xrange(4,5)]: model = load_model(fname) fnames = [VAL_DIR + 'cats/' + im for im in listdir(VAL_DIR + 'cats/')] fnames += [VAL_DIR + 'dogs/' + im for im in listdir(VAL_DIR + 'dogs/')] gen = prep_data(fnames, model.layers[0].input_shape[1], model.layers[0].input_shape[2]) dog_probabs = [] i = 0 for mini_batch in gen: y = dog_probab(model.predict(mini_batch)) dog_probabs.extend(y) i += mini_batch_sz if i % 100 == 0: print "Finished {} of {}".format(i, len(fnames)) # out = model.predict_generator(gen, val_samples=len(fnames)) pickle.dump(dog_probabs, open(ROOT + '/predictions/' + fname,'w')) print 'Done with ' + fname
def activate(self, *args, **kwargs): np.random.seed(1337) # for reproducibility st = datetime.now() self._classifierModel = load_model(self.savedModelPath) logger.info("{} {}".format(datetime.now() - st, "loaded _classifierModel")) st = datetime.now() self._tokenizer = self.get_tokenizer() logger.info("{} {}".format(datetime.now() - st, "loaded _tokenizer")) #st = datetime.now() #nltk.download() #self._tokenizer_nltk = nltk.data.load('tokenizers/punkt/english.pickle') #logger.info("{} {}".format(datetime.now() - st, "loaded _tokenizer_nltk")) logger.info("SuggestionMiningDL plugin is ready to go!")
def load_model_hdf(model_path, encoding_json=None, need_encoding=True): """Load a model from a .hdf file. If label encodings are not present, try to load them from enncoding_json.""" # try to get structure + encoding from hdf, else use the # one provided (or raise an exception if it is not provided). m = load_model(model_path, custom_objects={'qscore': qscore}) encoding = None with h5py.File(model_path, 'r') as h5: if _encod_path_ in h5: encoding = [s.decode() for s in h5[_encod_path_][()]] logging.info("Loaded encoding from {}.".format(model_path)) if encoding is None and encoding_json is not None: encoding = load_encoding(encoding_json) logging.info("Loaded encoding from {}.".format(encoding_json)) if encoding is None and need_encoding: raise KeyError("Could not find label encodings in the model, please provide an encoding json") return m, encoding
def generate_main(args): """ generates text from trained model specified in args. main method for generate subcommand. """ # load learning model for config and weights model = load_model(args.checkpoint_path) # build inference model and transfer weights inference_model = build_inference_model(model) inference_model.set_weights(model.get_weights()) logger.info("model loaded: %s.", args.checkpoint_path) # create seed if not specified if args.seed is None: with open(args.text_path) as f: text = f.read() seed = generate_seed(text) logger.info("seed sequence generated from %s.", args.text_path) else: seed = args.seed return generate_text(inference_model, seed, args.length, args.top_n)
def get_best_model(cnn, num_fold, model_type, submission_version): from keras.models import load_model final_model_path = MODELS_PATH + '{}_subm_type_{}_fold_{}.h5'.format(cnn, submission_version, num_fold) if model_type == 1: # Use with best score files = glob.glob(MODELS_PATH + '{}_fold_{}_score_*.h5'.format(cnn, num_fold + 1)) best_score = 1000000000 best_model_path = final_model_path for f in files: scr = float(f.split("_score_")[1].split(".")[0]) if scr < best_score: best_score = scr best_model_path = f else: # Use only latest best_model_path = MODELS_PATH + '{}_subm_type_{}_fold_{}.h5'.format(cnn, submission_version, num_fold) if "DENSENET" not in cnn: model = load_model(best_model_path) else: model = get_pretrained_model(cnn, CLASSES_NUMBER) model.load_weights(best_model_path) return model, best_model_path
def execute(symbol, begin, end, days, plot, model_path,random): print model_path model = load_model(model_path) env = gym.make('trading-v0').env env.initialise(symbol=symbol, start=begin, end=end, days=days, random = random) state_size = env.observation_space.shape[0] state = env.reset() done = False while not done: state = state.reshape(1, state_size) # state = state.reshape(1, 1, state_size) qval = model.predict(state, batch_size=1) action = (np.argmax(qval)) state, _, done, info = env.step(action) # log.info("%s,%s,%s,%s",state, _, done, info) # log.info("\n%s", env.sim.to_df()) if plot: env.render()
def get_most_recent_classifier(which,parent_dir="WikiLearn/data/models/classifier",spec=None): my_dir = os.path.join(parent_dir,which) classifiers = os.listdir(my_dir) most_recent = None for c in classifiers: try: if most_recent==None or int(c)>int(most_recent): most_recent=c except: continue classifier_dir=os.path.join(my_dir,most_recent) if spec!=None: classifier_f=os.path.join(classifier_dir,"lstm-classifier-%s.h5"%spec) else: classifier_f=os.path.join(classifier_dir,"lstm-classifier.h5") from keras.models import load_model classifier = load_model(classifier_f) return classifier # use input classifier and encoder to classify all documents in text.tsv
def __init__(self, dl_state, custom_objects=None, layer_name=None, layer_index=None): '''DL representations for images Args: layer_name ([list]): names for the layers to extract. layer_index ([list]): indices for the layers to extract. index=-2 corresponds to the last hidden layer. index=-4 corresponds to the last conv layer (before global averaging). ''' if layer_name is None and layer_index is None: raise Exception("One of [layer_name, layer_index] must be specified") dl_model = load_model(dl_state, custom_objects=custom_objects) if layer_index is not None: output_list = [ dl_model.get_layer(index=idx).output for idx in layer_index] else: output_list = [ dl_model.get_layer(name=nm).output for nm in layer_name] self.repr_model = Model(input=dl_model.input, output=output_list)
def test_saving_lambda_custom_objects(): input = Input(shape=(3,)) x = Lambda(lambda x: square_fn(x), output_shape=(3,))(input) output = Dense(3)(x) model = Model(input, output) model.compile(loss=objectives.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy]) x = np.random.random((1, 3)) y = np.random.random((1, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname, custom_objects={'square_fn': square_fn}) os.remove(fname) out2 = model.predict(x) assert_allclose(out, out2, atol=1e-05)
def pred(): tag_index = pickle.loads(open('tag_index.pkl', 'rb').read()) index_tag = { index:tag for tag, index in tag_index.items() } name_img150 = [] for name in filter(lambda x: '.jpg' in x, sys.argv): img = Image.open('{name}'.format(name=name)) img = img.convert('RGB') img150 = np.array(img.resize((150, 150))) name_img150.append( (name, img150) ) model = load_model(sorted(glob.glob('models/*.model'))[-1]) for name, img150 in name_img150: result = model.predict(np.array([img150]) ) result = result.tolist()[0] result = { i:w for i,w in enumerate(result)} for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]: print("{name} tag={tag} prob={prob}".format(name=name, tag=index_tag[i], prob=w) )
def load_model(data): ''' Load keras model. ''' model = Sequential() model.add(Dense(data.shape[1], activation='relu', input_dim=data.shape[1], kernel_constraint=maxnorm(3))) model.add(Dropout(0.25)) #model.add(Dense(16, activation='relu', kernel_constraint=maxnorm(3))) #model.add(Dropout(0.25)) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='RMSprop', loss='binary_crossentropy', metrics=['accuracy']) return model
def __init__(self, f_id, model_name='', hidden_layers=9, activation='selu'): self.f_id = f_id self.fit_history = None self.model_dir = os.path.join(MODEL_FOLDER, str(f_id), 'model') os.makedirs(self.model_dir, exist_ok=True) # init sequential model if model_name != '': self.model = load_model(os.path.join(self.model_dir, str(model_name))) else: self.model = Sequential() self.model.add(Dense(64, input_dim=2, activation=activation, kernel_initializer='random_normal')) for i in range(hidden_layers): self.model.add(Dense(64, activation=activation, kernel_initializer='random_normal')) self.model.add(AlphaDropout(0.2)) self.model.add(Dense(1, activation=activation, kernel_initializer='random_normal')) self.model.compile(loss='mse', optimizer='nadam')
def unet_fit(name, start_t, end_t, start_v, end_v, check_name = None): t = time.time() callbacks = [EarlyStopping(monitor='val_loss', patience = 15, verbose = 1), ModelCheckpoint('/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(name), monitor='val_loss', verbose = 0, save_best_only = True)] if check_name is not None: check_model = '/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(check_name) model = load_model(check_model, custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef}) else: model = unet_model() model.fit_generator(generate_train(start_t, end_t), nb_epoch = 150, verbose = 1, validation_data = generate_val(start_v, end_v), callbacks = callbacks, samples_per_epoch = 551, nb_val_samples = 50) return # In[5]:
def cnn3d_genfit(name, nn_model, epochs, start_t, end_t, start_v, end_v, nb_train, nb_val, check_name = None): callbacks = [EarlyStopping(monitor='val_loss', patience = 15, verbose = 1), ModelCheckpoint('/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(name), monitor='val_loss', verbose = 0, save_best_only = True)] if check_name is not None: check_model = '/home/w/DS_Projects/Kaggle/DS Bowl 2017/Scripts/LUNA/CNN/Checkpoints/{}.h5'.format(check_name) model = load_model(check_model) else: model = nn_model model.fit_generator(generate_train(start_t, end_t), nb_epoch = epochs, verbose = 1, validation_data = generate_val(start_v, end_v), callbacks = callbacks, samples_per_epoch = nb_train, nb_val_samples = nb_val) return
def load_model(self, dictfn): print("Loading models...") # Set model filenames autoencoder_filename = dictfn["autoencoder_filename"] encoder_filename = dictfn["encoder_filename"] decoder_filename = dictfn["decoder_filename"] # Load autoencoder architecture + weights + shapes self.autoencoder = load_model(autoencoder_filename) self.autoencoder_input_shape = self.autoencoder.input_shape # set input shape from loaded model self.autoencoder_output_shape = self.autoencoder.output_shape # set output shape from loaded model # Load encoder architecture + weights + shapes self.encoder = load_model(encoder_filename) self.encoder_input_shape = self.encoder.input_shape # set input shape from loaded model self.encoder_output_shape = self.encoder.output_shape # set output shape from loaded model # Load decoder architecture + weights + shapes self.decoder = load_model(decoder_filename) self.decoder_input_shape = self.decoder.input_shape # set input shape from loaded model self.decoder_output_shape = self.decoder.output_shape # set output shape from loaded model
def load(self, f_model, f_target_model, f_states, f_next_states, f_actions, f_rewards, f_config): self.model = load_model(f_model) self.model.compile(loss='mse', optimizer='rmsprop') self.target_model = load_model(f_target_model) self.target_model.compile(loss='mse', optimizer='rmsprop') d = pkl.load(open(f_config, 'r')) self.gamma = d['gamma'] self.update_freq = d['update_freq'] self.batch_size = d['batch_size'] self.time = d['time'] self.episode = d['episode'] #self.frame = d['frame'] self.frame_count = d['frame_count'] self.action_count = d['action_count'] self.Qfunction.model = self.model self.target_Qfunction.model = self.target_model self.replay_memory.load(f_states, f_next_states, f_actions, f_rewards) self.policy.model = self.model
def model(self, newmodel=False): if os.path.isfile('modules/irwin/models/playerBinary.h5') and not newmodel: print("model already exists, opening from file") return load_model('modules/irwin/models/playerBinary.h5') print('model does not exist, building from scratch') model = Sequential([ Dense(32, input_shape=(5,)), Activation('relu'), Dense(16), Activation('sigmoid'), Dense(8), Activation('softmax'), Dense(1), Activation('sigmoid') ]) model.compile(optimizer=Adam(), loss='binary_crossentropy', metrics=['accuracy']) return model
def loadModel(self,filename): #import h5py #f = h5py.File(filename, 'r+') #del f['optimizer_weights'] from keras.models import load_model self.keras_model=load_model(filename, custom_objects=global_loss_list) self.optimizer=self.keras_model.optimizer self.compiled=True
def plot_(model_path, file_path): """Visualize a model Parameters ---------- model_path : str Path to the model.h5 file_path : str Destination file to save i.e. model.png """ model = load_model(model_path) plot_model(model, file_path, show_shapes=True, show_layer_names=False)
def load(self, model_filename): self.__model = load_model("%s.model" % model_filename) self.__chars = np.load("%s.cvocab.npy" % model_filename).tolist() self.__trigrams = np.load("%s.tvocab.npy" % model_filename).tolist() self.__classes = np.load("%s.classes.npy" % model_filename).tolist() self.__char_indexes = dict((c, i) for i, c in enumerate(self.__chars)) self.__indexes_char = dict((i, c) for i, c in enumerate(self.__chars)) self.__trigrams_indexes = dict((t, i) for i, t in enumerate(self.__trigrams)) self.__indices_trigrams = dict((i, t) for i, t in enumerate(self.__trigrams)) self.__classes_indexes = dict((c, i) for i, c in enumerate(self.__classes)) self.__indexes_classes = dict((i, c) for i, c in enumerate(self.__classes))
def load_model(self, epoch=None): ''' Loads a saved model. If epoch id is provided, will load the corresponding model. Or else, will load the best model. ''' if not epoch: self.model = load_model("%s.model" % self.model_name_prefix, custom_objects=self.custom_objects) else: self.model = load_model("%s_%d.model" % (self.model_name_prefix, epoch), custom_objects=self.custom_objects) self.model.summary() self.data_processor = pickle.load(open("%s.dataproc" % self.model_name_prefix, "rb"))
def load_model(self, epoch=None): ''' Loads a saved model. If epoch id is provided, will load the corresponding model. Or else, will load the best model. ''' if not epoch: self.model = load_model("%s.model" % self.model_name_prefix, custom_objects=self.custom_objects) else: self.model = load_model("%s_%d.model" % (self.model_name_prefix, epoch), custom_objects=self.custom_objects) self.data_processor = pickle.load(open("%s.dataproc" % self.model_name_prefix, "rb")) self.label_map = pickle.load(open("%s.labelmap" % self.model_name_prefix, "rb"))
def load_keras_model(config, custom_objects=None, fold=0): m_file = os.path.join( config.BASE_DIR, '{}_trial_{}_db_{}_k_{}_model.h5'.format( config.INFO, config.TRIAL, DirConfig.DEBUG, fold)) if os.path.isfile(m_file): model = load_model(m_file, custom_objects) return model else: return None
def load_model(model_data_path): from keras.models import load_model return load_model(model_data_path)
def load_vae_model(model_file): return load_keras_model(model_file, custom_objects={"KCompetitive": KCompetitive})
def load_ae_model(model_file): return load_keras_model(model_file, custom_objects={"KCompetitive": KCompetitive})
def init_localizer(args_localizer_model): global localizer_model, pointnet_points, tf_localizer_graph print("Loading localizer model " + args_localizer_model) localizer_model = load_model(args_localizer_model, compile=False) localizer_model._make_predict_function() # https://github.com/fchollet/keras/issues/6124 localizer_model.summary() # TODO: check consistency against segmenter model (rings) pointnet_points = localizer_model.get_input_shape_at(0)[0][1] print('Loaded localizer model with ' + str(pointnet_points) + ' points') if K._backend == 'tensorflow': tf_localizer_graph = tf.get_default_graph() print(tf_localizer_graph) return
def insert_deep_learning_model(pipeline_step, file_name): # This is where we saved the random_name for this model random_name = pipeline_step.model # Load the Keras model here keras_file_name = file_name[:-5] + random_name + '_keras_deep_learning_model.h5' model = keras_load_model(keras_file_name) # Put the model back in place so that we can still use it to get predictions without having to load it back in from disk return model
def load(self, file_path=FILE_PATH): print('Model Loaded.') self.model = load_model(file_path)
def load_model(self, path): self.model = load_model(path) return self
def save(self, filepath, overwrite=True): '''Save into a single HDF5 file: - The model architecture, allowing to re-instantiate the model. - The model weights. - The state of the optimizer, allowing to resume training exactly where you left off. This allows you to save the entirety of the state of a model in a single file. Saved models can be reinstantiated via `keras.models.load_model`. The model returned by `load_model` is a compiled model ready to be used (unless the saved model was never compiled in the first place). # Example usage ```python from keras.models import load_model model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model # deletes the existing model # returns a compiled model # identical to the previous one model = load_model('my_model.h5')
''' from ..models import save_model save_model(self, filepath, overwrite)
```
def test_sequential_model_saving(): model = Sequential() model.add(Dense(2, input_dim=3)) model.add(RepeatVector(3)) model.add(TimeDistributed(Dense(3))) model.compile(loss=objectives.MSE, optimizer=optimizers.RMSprop(lr=0.0001), metrics=[metrics.categorical_accuracy], sample_weight_mode='temporal') x = np.random.random((1, 3)) y = np.random.random((1, 3, 3)) model.train_on_batch(x, y) out = model.predict(x) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) new_model = load_model(fname) os.remove(fname) out2 = new_model.predict(x) assert_allclose(out, out2, atol=1e-05) # test that new updates are the same with both models x = np.random.random((1, 3)) y = np.random.random((1, 3, 3)) model.train_on_batch(x, y) new_model.train_on_batch(x, y) out = model.predict(x) out2 = new_model.predict(x) assert_allclose(out, out2, atol=1e-05)
def test_saving_without_compilation(): model = Sequential() model.add(Dense(2, input_dim=3)) model.add(Dense(3)) model.compile(loss='mse', optimizer='sgd', metrics=['acc']) _, fname = tempfile.mkstemp('.h5') save_model(model, fname) model = load_model(fname) os.remove(fname)
def check_model(path=MODEL_PATH, file=SAMPLE_CSV_FILE, nsamples=2): ''' see predictions generated for the training dataset ''' # load model model = load_model(path) # load data data, dic = get_data(file) rows, questions, true_answers = encode_data(data, dic) # visualize model graph # plot_model(model, to_file='tableqa_model.png') # predict answers prediction = model.predict([rows[:nsamples], questions[:nsamples]]) print prediction predicted_answers = [[np.argmax(character) for character in sample] for sample in prediction] print predicted_answers print true_answers[:nsamples] # one hot encode answers # true_answers = [to_categorical(answer, num_classes=len(dic)) for answer in answers[:nsamples]] # decode chars from char ids int inv_dic = {v: k for k, v in dic.iteritems()} for i in xrange(nsamples): print '\n' # print 'Predicted answer: ' + ''.join([dic[char] for char in sample]) print 'Table: ' + ''.join([inv_dic[char_id] for char_id in rows[i] if char_id != 0]) print 'Question: ' + ''.join([inv_dic[char_id] for char_id in questions[i] if char_id != 0]) print 'Answer(correct): ' + ''.join([inv_dic[char_id] for char_id in true_answers[i] if char_id != 0]) print 'Answer(predicted): ' + ''.join([inv_dic[char_id] for char_id in predicted_answers[i] if char_id != 0])
def __init__(self, weights=None): """Either load pretrained from imagenet, or load our saved weights from our own training.""" self.weights = weights # so we can check elsewhere which model if weights is None: # Get model with pretrained weights. base_model = InceptionV3( weights='imagenet', include_top=True ) # We'll extract features at the final pool layer. self.model = Model( inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output ) else: # Load the model first. self.model = load_model(weights) # Then remove the top so we get features not predictions. # From: https://github.com/fchollet/keras/issues/2371 self.model.layers.pop() self.model.layers.pop() # two pops to get to pool layer self.model.outputs = [self.model.layers[-1].output] self.model.output_layers = [self.model.layers[-1]] self.model.layers[-1].outbound_nodes = []