我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.models.model_from_json()。
def init(): json_file = open('model.json','r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) #load woeights into new model loaded_model.load_weights("model.h5") print("Loaded Model from disk") #compile and evaluate loaded model loaded_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy']) #loss,accuracy = model.evaluate(X_test,y_test) #print('loss:', loss) #print('accuracy:', accuracy) graph = tf.get_default_graph() return loaded_model,graph
def __init__(self, nb_classes, resnet_layers, input_shape, weights): """Instanciate a PSPNet.""" self.input_shape = input_shape json_path = join("weights", "keras", weights + ".json") h5_path = join("weights", "keras", weights + ".h5") if isfile(json_path) and isfile(h5_path): print("Keras model & weights found, loading...") with open(json_path, 'r') as file_handle: self.model = model_from_json(file_handle.read()) self.model.load_weights(h5_path) else: print("No Keras model & weights found, import from npy weights.") self.model = layers.build_pspnet(nb_classes=nb_classes, resnet_layers=resnet_layers, input_shape=self.input_shape) self.set_npy_weights(weights)
def load_model(self): self.model = model_from_json(open(self.model_filename).read()) self.model.load_weights(self.weights_filename) self.categories = np.load(self.categories_filename) with open(self.categories_filename, 'r') as infile: self.categories = pickle.load(infile) self.inv_categories = {v: k for k, v in self.categories.items()} if self.verbose: print 'compiling model ... ' start_time = time.time() self.model.compile(optimizer=self.optimizer, loss='categorical_crossentropy', metrics=['accuracy']) if self.verbose: end_time = time.time() self.print_time(start_time,end_time,'compiling model')
def load_model_from_checkpoint(model_dir): '''Loads the best performing model from checkpoint_dir''' with open(os.path.join(model_dir, 'model.json'), 'r') as f: model = model_from_json(f.read()) epoch = 0 newest_checkpoint = max(glob.iglob(model_dir + '/checkpoints/*.hdf5'), key=os.path.getctime) if newest_checkpoint: epoch = int(newest_checkpoint[-22:-19]) model.load_weights(newest_checkpoint) return model, epoch
def main(): # Get Model: model_file = open('Data/Model/model.json', 'r') model = model_file.read() model_file.close() model = model_from_json(model) model.load_weights("Data/Model/weights.h5") # Get camera: cap = cv2.VideoCapture(0) # Open game in browser: open_game(browser='chrome', url='http://apps.thecodepost.org/trex/trex.html') while 1: # Get image from camera: ret, img = cap.read() Y = predict(model, img) if Y == 0: release() elif Y == 1: press() cap.release()
def load_model(name): from keras.models import model_from_json """ Loads a Keras model from disk. The model should be contained in two files: a .json with description of the architecture and a .h5 with model weights. See save_model() to save the model. Reference: http://keras.io/faq/#how-can-i-save-a-keras-model Parameters: ----------- name: Name of the model contained in the file names: <name>_architecture.json <name>_weights.h5 Returns: -------- model: Keras model object. """ # Uses 'with' to ensure that file is closed properly with open(name + '_architecture.json') as f: model = model_from_json(f.read()) model.load_weights(name + '_weights.h5') return model
def get_model(self, filename=None): """Given a filename, load that model file; otherwise, generate a new model.""" model = None if filename: info('attempting to load model {}'.format(filename)) try: model = model_from_json(open(filename).read()) except FileNotFoundError: print('could not load file {}'.format(filename)) quit() print('loaded model file {}'.format(filename)) else: print('no model file loaded, generating new model.') size = self.reversi.size ** 2 model = Sequential() model.add(Dense(HIDDEN_SIZE, activation='relu', input_dim=size)) # model.add(Dense(HIDDEN_SIZE, activation='relu')) model.add(Dense(size)) model.compile(loss='mse', optimizer=optimizer) return model
def player(cls, config, env_factory, model_json, ctrl_queue, out_queue, done_rewards_queue): os.environ['CUDA_VISIBLE_DEVICES'] = '' with tf.device("/cpu:0"): model = model_from_json(model_json) players = [Player(env_factory(), config.a3c_steps, config.a3c_gamma, config.max_steps, idx) for idx in range(config.swarm_size)] # input_t, conv_out_t = atari.net_input(players[0].env) # n_actions = players[0].env.action_space.n # model = make_run_model(input_t, conv_out_t, n_actions) while True: # check ctrl queue for new model if not ctrl_queue.empty(): weights = ctrl_queue.get() # stop requested if weights is None: break model.set_weights(weights) for sample in Player.step_players(model, players): out_queue.put(sample) for rw in Player.gather_done_rewards(*players): done_rewards_queue.put(rw)
def __init__(self): self.dataclose = self.datas[0].close self.order = None self.order_dict = {} # self.signal_add(bt.SIGNAL_LONG, bt.ind.CrossOver(sma1, sma2)) # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() self.model = model_from_json(loaded_model_json) # load weights into new model self.model.load_weights("model.h5") print("Loaded model from disk") # evaluate loaded model on test data self.model.compile(loss='mean_squared_error', optimizer='adam')
def __init__(self): self.dataclose = self.datas[0].close self.order = None self.orefs = list() # self.signal_add(bt.SIGNAL_LONG, bt.ind.CrossOver(sma1, sma2)) # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() self.model = model_from_json(loaded_model_json) # load weights into new model self.model.load_weights("model.h5") print("Loaded model from disk") # evaluate loaded model on test data self.model.compile(loss='mean_squared_error', optimizer='adam')
def __init__(self): self.dataclose = self.datas[0].close self.order = None # self.signal_add(bt.SIGNAL_LONG, bt.ind.CrossOver(sma1, sma2)) # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() self.model = model_from_json(loaded_model_json) # load weights into new model self.model.load_weights("model.h5") print("Loaded model from disk") # evaluate loaded model on test data self.model.compile(loss='mean_squared_error', optimizer='adam')
def load_keras_model(weights, yaml=None, json=None, normalise_conv_for_one_hot_encoded_input=False, axis_of_normalisation=None, name_of_conv_layer_to_normalise=None): if (normalise_conv_for_one_hot_encoded_input): assert axis_of_normalisation is not None,\ "specify axis of normalisation for normalising one-hot encoded input" assert yaml is not None or json is not None,\ "either yaml or json must be specified" assert yaml is None or json is None,\ "only one of yaml or json must be specified" if (yaml is not None): from keras.models import model_from_yaml model = model_from_yaml(open(yaml).read()) else: from keras.models import model_from_json model = model_from_json(open(json).read()) model.load_weights(weights) if (normalise_conv_for_one_hot_encoded_input): mean_normalise_first_conv_layer_weights( model, axis_of_normalisation=axis_of_normalisation, name_of_conv_layer_to_normalise=name_of_conv_layer_to_normalise) return model
def predict_by_one(cube): # load json and create model json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights("model.hdf5") print("Loaded model from disk") sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) loaded_model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy']) x = cube.reshape(-1,1,6,20,20) print(x.shape) result = loaded_model.predict(x,batch_size=10, verbose=0) # print(result.shape) # show result for i in result: print(i[0],i[1]) return result
def main(): with open("text_model.json", 'r') as content_file: json_string = content_file.read() model = model_from_json(json_string) model.load_weights('on-texts-weights-improvement-09-0.96.hdf5') while True: phrase = input("Russian phrase to accentuate: ") words = parse_the_phrase(phrase) accented_phrase = [] pluswords = add_endings(words) for w in pluswords: if not bool(re.search(REG, w)): accented_phrase.append(w) else: accented_phrase.append(predict(model, w)) accented_phrase = ' '.join(accented_phrase) print(accented_phrase)
def for_bot(phrase): # with open("text_model.json", 'r') as content_file: # json_string = content_file.read() # model = model_from_json(json_string) # model.load_weights('on-texts-weights-improvement-09-0.96.hdf5') words = parse_the_phrase(phrase) accented_phrase = [] pluswords = add_endings(words) for w in pluswords: if w == "_": continue elif not bool(re.search(REG, w)): accented_phrase.append(w) else: accented_phrase.append(predict(model, w)) accented_phrase = ' '.join(accented_phrase) return accented_phrase
def test(): with open(model_file, 'r') as jfile: model = model_from_json(jfile.read(), {'HighwayUnit': HighwayUnit()}) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy') # weights_file = model_file.replace('json', 'h5') # model.load_weights(weights_file) model.load_weights(weight_file) # load data images, image_id, species = load_test_data() # load training mean and standard dev train_mean = np.load('train_mean.npy') train_std = np.load('train_std.npy') images = (images - train_mean)/train_std test_dataset = reformat(images) yP = model.predict(test_dataset) np.save('testProbs', yP) print('Completed processing {} test images'.format(str(image_id.shape[0]))) write_results_to_file(species, image_id, yP)
def load_model(): try: json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) model.load_weights("weights.hdf5") print("Model successfully loaded from disk.") #compile again model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) return model except: print("""Model not found. Please train the CNN by running the script cnn_train.py. Note that the training and test samples should be properly set up in the dataset directory.""") return None
def unserialize(self, d, epoch=None, load_everything=True): self.model = model_from_json(open(op.join(d, 'model.json')).read()) weights_file = ('weights.h5' if epoch is None else 'weights_epoch%d.h5' % epoch) self.model.load_weights(op.join(d, weights_file)) if load_everything: class_data_file = op.join(d, 'class_data.pickle') if op.exists(class_data_file): class_data = pickle.load(open(class_data_file)) for x in self.serialized_attributes: setattr(self, x, class_data[x]) test_results = op.join(d, 'test_results.csv') if os.path.exists(test_results): self.test_results = pd.read_csv(test_results) training_history = op.join(d, 'training_history.json') if os.path.exists(training_history): self.training_history = json.load(open(training_history))
def __init__(self, architecture_file=None, weight_file=None, optimizer=None): # Generate mapping for softmax layer to characters output_str = '0123456789abcdefghijklmnopqrstuvwxyz ' self.output = [x for x in output_str] self.L = len(self.output) # Load model and saved weights from keras.models import model_from_json if architecture_file is None: self.model = model_from_json(open('char2_architecture.json').read()) else: self.model = model_from_json(open(architecture_file).read()) if weight_file is None: self.model.load_weights('char2_weights.h5') else: self.model.load_weights(weight_file) if optimizer is None: from keras.optimizers import SGD optimizer = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) self.model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def load_model(model): # Load model _, model_basename = split(model.rstrip(sep)) model_arch = join(model, model_basename + '_architecture.json') model_weights = join(model, model_basename + '_best_weights.h5') if not isfile(model_arch): raise IOError('Unable to locate model architecture (.json)') if not isfile(model_weights): raise IOError('Unable to locate model weights (.h5)') model = model_from_json(open(model_arch).read()) model.load_weights(model_weights) return model
def build_model(model_in=None, model_out=None): if model_in: print("Reading model...") with open(model_in+'.json', 'r') as model_file: model = model_from_json(model_file.read()) else: print('Build model...') model = Sequential() model.add(LSTM(64, return_sequences=True, input_shape=(seqlen, len(chars)))) model.add(Dropout(0.2)) model.add(LSTM(64, return_sequences=False)) model.add(Dropout(0.2)) model.add(Dense(len(chars))) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='rmsprop') if model_out: print("Saving model...") with open(model_out+'.json', 'w') as model_file: model_file.write(model.to_json()) return model
def load_model(self, model_name): ''' Load a model INPUT (1) string 'model_name': filepath to model and weights, not including extension OUTPUT: Model with loaded weights. can fit on model using loaded_model=True in fit_model method ''' print 'Loading model {}'.format(model_name) model_toload = '{}.json'.format(model_name) weights = '{}.hdf5'.format(model_name) with open(model_toload) as f: m = f.next() model_comp = model_from_json(json.loads(m)) model_comp.load_weights(weights) print 'Model loaded.' self.model = model_comp return model_comp
def get_reconst_from_embed(self, embed, node_l=None, filesuffix=None): if filesuffix is None: if node_l is not None: return self._decoder.predict( embed, batch_size=self._n_batch)[:, node_l] else: return self._decoder.predict(embed, batch_size=self._n_batch) else: try: decoder = model_from_json( open('decoder_model_' + filesuffix + '.json').read() ) except: print('Error reading file: {0}. Cannot load previous model'.format('decoder_model_'+filesuffix+'.json')) exit() try: decoder.load_weights('decoder_weights_' + filesuffix + '.hdf5') except: print('Error reading file: {0}. Cannot load previous weights'.format('decoder_weights_'+filesuffix+'.hdf5')) exit() if node_l is not None: return decoder.predict(embed, batch_size=self._n_batch)[:, node_l] else: return decoder.predict(embed, batch_size=self._n_batch)
def model_predict(X, pipeline): if model_type == "mlp": json_file = open(projectfolder + '/model.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) model.load_weights(projectfolder + "/weights.hdf5") model.compile(loss=pipeline['options']['loss'], optimizer=pipeline['options']['optimizer'], metrics=pipeline['options']['scoring']) if type(X) is pandas.DataFrame: X = X.values Y = model.predict(X) else: picklefile = projectfolder + "/model.out" with open(picklefile, "rb") as f: model = pickle.load(f) Y = model.predict(X) return Y
def load_model(architecture_fp, weights_fp): """ Load architecture and weights for a Keras model Args: architecture_fp (str): File path to the model architecture (in JSON format) weights_fp (str): File path to the model weights (in HDF5 format) Retruns: A Keras trained model """ # Load architecture with open(architecture_fp, 'r') as architecture_file: model_architecture = json.load(architecture_file) loaded_model = model_from_json(model_architecture) # Load weights loaded_model.load_weights(weights_fp) return loaded_model
def readModel(path): """ Loads the model found at path. Parameters ---------- path: str The path to the model to be loaded. Note that the end of the path should give the name of the network but not the file extensions, which are added automatically. Returns ------- Model A Keras neural network with the structure and weights of the one found at path. """ model = model_from_json(open(os.path.join(path + 'Structure.json')).read()) model.load_weights(os.path.join(path + 'Weights.h5')) return model
def loadModel(self, path): """ Loads the neural network saved at path. Parameters ---------- path: string The path to the stored model. Returns: Keras.models.Sequential The neural network, compiled and ready to run. """ model = model_from_json(open(os.path.join(path + 'Structure.json')).read()) model.load_weights(os.path.join(path + 'Weights.h5')) model.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy']) return model
def main(img_dir): img = get_img(img_dir).astype('float32') img /= 255. # Getting model: model_file = open('Data/Model/model.json', 'r') model = model_file.read() model_file.close() model = model_from_json(model) # Getting weights model.load_weights("Data/Model/weights.h5") Y = predict(model, img) name = 'segmentated.jpg' save_img(Y, name) print('Segmentated image saved as '+name)
def logoPredictor(path, rows, cols): model = model_from_json(open(path+'logo_architecture.json').read()) model.load_weights(path+'logo_weights.h5') model.compile(optimizer = 'adam', loss='categorical_crossentropy',metrics=['accuracy']) imgs, imgNames = loadImgs(path+'logo_test/', rows, cols) imgs = imgs.reshape(imgs.shape[0], 1, rows, cols) classes = model.predict_classes(imgs) _, model_Dict = modelDict(path) output = [] for cls in classes: output.append(model_Dict.keys()[model_Dict.values().index(int(cls))]) return output #A Numpy array of predictions
def load_model(): global classifier model_file=open('Trex_game/model.json', 'r') loaded_model=model_file.read() model_file.close() classifier=model_from_json(loaded_model) #load weights into new model classifier.load_weights("Trex_game/weights.hdf5") print("Model loaded successfully") #get ready to roll
def read_model(cross=''): json_name = '../src/cache/architecture_4_50_2017-07-06-07-46.json' weight_name = '../src/cache/resnet_full_2017-07-07-08-40.hdf5' model = model_from_json(open(json_name).read()) model.load_weights(weight_name) return model
def read_model(cross=''): json_name = 'architecture_' + cross + '.json' weight_name = 'model_weights_' + cross + '.h5' model = model_from_json(open(os.path.join('../src/cache', json_name)).read()) model.load_weights(os.path.join('../src/cache', weight_name)) return model
def read_model(cross=''): json_name = '../src/cache/architecture_24_50_2017-07-03-14-07.json' weight_name = '../src/cache/resnet_full_2017-07-04-11-57.hdf5' model = model_from_json(open(json_name).read()) model.load_weights(weight_name) return model
def _load_model_config(model_cfg, model_weights): if type(model_cfg) == str: if not os.path.exists(model_cfg): try: class_ = getattr(applications, model_cfg) return class_(weights=model_weights) except AttributeError: available_mdls = [attr for attr in dir(applications) if callable(getattr(applications, attr))] raise ValueError('Could not load pretrained model with key {}. ' 'Available models: {}'.format(model_cfg, ', '.join(available_mdls))) with open(model_cfg, 'r') as fileh: try: return model_from_json(fileh) except ValueError: pass try: return model_from_yaml(fileh) except ValueError: pass raise ValueError('Could not load model from configuration file {}. ' 'Make sure the path is correct and the file format is yaml or json.'.format(model_cfg)) elif type(model_cfg) == dict: return Model.from_config(model_cfg) elif type(model_cfg) == list: return Sequential.from_config(model_cfg) raise ValueError('Could not load model from configuration object of type {}.'.format(type(model_cfg)))
def load_model(saved_model): '''Load Model WHAT: Loads a saved model and makes it available for prediction use by predictor(). ''' json_file = open(saved_model + ".json", 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) # load weights into new model loaded_model.load_weights(saved_model + '.h5') f = open(saved_model+".x", 'r') temp = f.read() try: X = map(int, temp.split()[:-1]) except ValueError: X = temp.split()[:-1] try: flatten = float(temp.split()[-1]) except ValueError: flatten = temp.split()[-1] f.close() if type(X) == list and len(X) == 1: X = X[0] return loaded_model, X, flatten
def model_trainer(fileModelJSON): print("Model Trainer Thread Starting...") fileWeights = fileModelJSON.replace('json', 'h5') with open(fileModelJSON, 'r') as jfile: model = model_from_json(json.load(jfile)) adam = Adam(lr=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=adam, loss="mse", metrics=['accuracy']) model.load_weights(fileWeights) print("Loaded model from disk:") model.summary() # start training loop... while 1: if len(X) > 100: batch_size = 20 samples_per_epoch = int(len(X)/batch_size) val_size = int(samples_per_epoch/10) if val_size < 10: val_size = 10 nb_epoch = 100 history = model.fit_generator(batchgen(X,Y), samples_per_epoch=samples_per_epoch, nb_epoch=nb_epoch, validation_data=batchgen(X,Y), nb_val_samples=val_size, verbose=1) print("Saving model to disk: ",fileModelJSON,"and",fileWeights) if Path(fileModelJSON).is_file(): os.remove(fileModelJSON) json_string = model.to_json() with open(fileModelJSON,'w' ) as f: json.dump(json_string, f) if Path(fileWeights).is_file(): os.remove(fileWeights) model.save_weights(fileWeights) else: print("Not Ready! Sleeping for 5...") sleep(5)
def load_model(model_name, yaml=True): """ :rtype: object """ if yaml: ext = '.yaml' model = model_from_yaml(open(model_name + ext).read()) else: ext = '.json' model = model_from_json(open(model_name + ext).read()) model.load_weights(model_name + '_weights.h5') # model.compile(loss='mean_squared_error', optimizer='rmsprop', metrics=['accuracy']) print("model " + model_name + " loaded") return model
def get_pretrained_squeezenet(model_path, weights_path): model = model_from_json(open(model_path).read()) model.load_weights(weights_path) return model
def get_pretrained_squeezenet(model_path, weights_path): model = model_from_json(open(model_path).read()) model.load_weights(weights_path) return model # Experimental network design for small images !
def _load_model(self, model_network_path, model_weight_path): """Load a keras model from disk Parameters ---------- model_network_path: str Path where the model network path is (json file) model_weight_path: str Path where the model network weights are (hd5 file) Returns ------- model: A keras model """ from keras.models import model_from_json # Load the model network json_file = open(model_network_path, 'r') loaded_model_json = json_file.read() json_file.close() # Load the model weights loaded_model = model_from_json(loaded_model_json, custom_objects={ 'relu6': _keras.applications.mobilenet.relu6, 'DepthwiseConv2D': _keras.applications.mobilenet.DepthwiseConv2D}) if model_weight_path != None: if os.path.isfile(model_weight_path): loaded_model.load_weights(model_weight_path) self.weight_loaded = True print("Network file [{}] and [{}] is loaded successfully.".format(model_network_path, model_weight_path)) else: print("Warning: Weights File [%s] is not found." % (model_weight_path)) return loaded_model
def _load_keras_model(model_network_path, model_weight_path): """Load a keras model from disk Parameters ---------- model_network_path: str Path where the model network path is (json file) model_weight_path: str Path where the model network weights are (hd5 file) Returns ------- model: A keras model """ from keras.models import model_from_json import json # Load the model network json_file = open(model_network_path, 'r') loaded_model_json = json_file.read() json_file.close() # Load the model weights loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_weight_path) return loaded_model
def _load_keras_model(model_network_path, model_weight_path): """Load a keras model from disk Parameters ---------- model_network_path: str Path where the model network path is (json file) model_weight_path: str Path where the model network weights are (hd5 file) Returns ------- model: A keras model """ from keras.models import model_from_json import json # Load the model network json_file = open(model_network_path, 'r') json_string = json_file.read() json_file.close() loaded_model_json = json.loads(json_string) # Load the model weights loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(model_weight_path) return loaded_model
def load_model(self, json_model_file, h5_model_file): #### load the model #### json_file = open(json_model_file, 'r') loaded_model_json = json_file.read() json_file.close() loaded_model = model_from_json(loaded_model_json) loaded_model.load_weights(h5_model_file) print("Loaded model from disk") #### compile the model #### self.model = loaded_model self.compile_model()
def __init__(self, *args, **kwds): BaseEngine.__init__(self, *args, **kwds) self.model = model_from_json(open(model_filename).read()) #self.model.compile(loss='categorical_crossentropy', optimizer='adam') self.model.load_weights(weights_filename)
def load_model(self): if True: model_file = open('model2.json', 'r') loaded_model = model_file.read() model_file.close() model = model_from_json(loaded_model) #load weights into new model model.load_weights("weights2.hdf5") print("Model loaded successfully") return model else: print("Error importing the model. Make sure the model has\ been trained\nand the model.json and weights.hdf5 files are in place, and try again.") os._exit(0)
def get(self): batch_size = 1 rolling_window_size = 5 # r = requests.get('http://localhost:5000/api/v1/neural_data') # Call neural_data # print(r) Neural_Data.get(Neural_Data) data = list(mongo.db.neural_data.find({}).sort('timestamp', -1).limit(5)) arr = create_rolling_window(data, rolling_window_size) X = np.array([arr]) X = np.reshape(X, (X.shape[0], 1, X.shape[1])) json_file = open('model.json', 'r') loaded_model_json = json_file.read() json_file.close() model = model_from_json(loaded_model_json) # load weights into new model model.load_weights("model.h5") print("Loaded model from disk") prediction = model.predict(X, batch_size=batch_size, verbose=1) prediction = prediction.tolist()[0][0] print(colored.green("Make new prediction...And its {}".format(prediction))) prediction = round(prediction, 0) result = { 'price_growth': True } if prediction == 0: result["price_growth"] = False print('Price will fall') else: print('Price will growth') return jsonify(result)