我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用keras.preprocessing.image.load_img()。
def testTHPrediction(self): keras.backend.set_image_dim_ordering('th') model = SqueezeNet() img = image.load_img('images/cat.jpeg', target_size=(227, 227)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) decoded_preds = decode_predictions(preds) #print('Predicted:', decoded_preds) self.assertIn(decoded_preds[0][0][1], 'tabby') #self.assertAlmostEqual(decode_predictions(preds)[0][0][2], 0.82134342)
def extract(self, image_path): img = image.load_img(image_path, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # Get the prediction. features = self.model.predict(x) if self.weights is None: # For imagenet/default network: features = features[0] else: # For loaded network: features = features[0] return features
def ext_img_feat(image_folder, batch_size): base_model = ResNet50(weights='imagenet') img_model = Model(input=base_model.input, output=base_model.get_layer('res5c').output) img_list = os.listdir(image_folder) all_img_feats = list() si = 0 while si < len(img_list): batch_img = img_list[si:si+batch_size] si += batch_size imgs = [] for imgf in batch_img: img_path = os.path.join(image_folder, imgf) img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) imgs.append(x) imgs = np.concatenate(imgs, axis=0) img_feats = img_model.predict(imgs) all_img_feats.append(img_feats) print('%d images extracted\r'%si),
def extract_feature(dir_path, net): features = [] infos = [] num = 0 for image_name in os.listdir(dir_path): arr = image_name.split('_') person = int(arr[0]) camera = int(arr[1][1]) image_path = os.path.join(dir_path, image_name) img = image.load_img(image_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) feature = net.predict(x) features.append(np.squeeze(feature)) infos.append((person, camera)) return features, infos # use GPU to calculate the similarity matrix
def preprocess_image_crop(image_path, img_size): ''' Preprocess the image scaling it so that its smaller size is img_size. The larger size is then cropped in order to produce a square image. ''' img = load_img(image_path) scale = float(img_size) / min(img.size) new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1]))) # print('old size: %s,new size: %s' %(str(img.size), str(new_size))) img = img.resize(new_size, resample=Image.BILINEAR) img = img_to_array(img) crop_h = img.shape[0] - img_size crop_v = img.shape[1] - img_size img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :] img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) return img # util function to open, resize and format pictures into appropriate tensors
def preprocess_image_scale(image_path, img_size=None): ''' Preprocess the image scaling it so that its larger size is max_size. This function preserves aspect ratio. ''' img = load_img(image_path) if img_size: scale = float(img_size) / max(img.size) new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1]))) img = img.resize(new_size, resample=Image.BILINEAR) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) return img # util function to convert a tensor into a valid image
def test_bare_keras_module(self): """ Keras GraphFunctions should give the same result as standard Keras models """ img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg')) for model_gen, preproc_fn in [(InceptionV3, iv3.preprocess_input), (Xception, xcpt.preprocess_input), (ResNet50, rsnt.preprocess_input)]: keras_model = model_gen(weights="imagenet") target_size = tuple(keras_model.input.shape.as_list()[1:-1]) _preproc_img_list = [] for fpath in img_fpaths: img = load_img(fpath, target_size=target_size) # WARNING: must apply expand dimensions first, or ResNet50 preprocessor fails img_arr = np.expand_dims(img_to_array(img), axis=0) _preproc_img_list.append(preproc_fn(img_arr)) imgs_input = np.vstack(_preproc_img_list) preds_ref = keras_model.predict(imgs_input) gfn_bare_keras = GraphFunction.fromKeras(keras_model) with IsolatedSession(using_keras=True) as issn: K.set_learning_phase(0) feeds, fetches = issn.importGraphFunction(gfn_bare_keras) preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_input}) self.assertTrue(np.all(preds_tgt == preds_ref))
def _load_img_pair(self, idx, load_from_memory): """Get a pair of images with index idx.""" if load_from_memory: a = self.a[idx] b = self.b[idx] return a, b fname = self.filenames[idx] a = load_img(os.path.join(self.a_dir, fname), grayscale=self.is_a_grayscale, target_size=self.target_size) b = load_img(os.path.join(self.b_dir, fname), grayscale=self.is_b_grayscale, target_size=self.target_size) a = img_to_array(a, self.dim_ordering) b = img_to_array(b, self.dim_ordering) return a, b
def test(): classes = [] for subdir in sorted(os.listdir('data/train')): if os.path.isdir(os.path.join('data/train', subdir)): classes.append(subdir) m = genmodel() m.load_weights('weights.model') image = load_img('data/predict/c.png', target_size=(48, 48)).convert('L') x = img_to_array(image) x = x.reshape((1,) + x.shape) k = m.predict(x)[0] ks = k.argsort() l = classes print(ks[-1]) print(l[ks[-1]], l[ks[-2]], l[ks[-3]])
def generate_data(): while 1: X_Data = np.empty([0,224,224,3]) Y_Data = np.empty([0]) file_idx = 2200 img_path = '../data/%d.tif' % file_idx csv_path = '../data/%d.csv' % file_idx while os.path.isfile(img_path) and os.path.isfile(csv_path): print img_path + " " + csv_path img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) X_Data = np.append(X_Data,x,axis=0) print img_path # if (file_idx % 20) == 19: yield(X_Data) X_Data = np.empty([0,224,224,3]) file_idx = file_idx + 1 img_path = '../data/%d.tif' % file_idx csv_path = '../data/%d.csv' % file_idx
def imagefromlist(testlist): sock.sendall('(448, 448, 3)') received = sock.recv(1024) #print received # f = open(testlist) for img_path in f: timg = image.load_img(img_path.strip(), target_size=(448, 448)) xx = image.img_to_array(timg) try: (orgh,orgw,c) = xx.shape #print xx.shape data = xx.reshape(448*448*3).astype(np.uint8) sendimg(sock, data) time.sleep(3) except: continue sock.close()
def load_array_image(paths, mode, kernel=(128, 128), img_filter='zoom', channels=3, model=None, zoom_learn=2, resize=False, **kwargs): """ 3 channels as 3 batch datas :param path: :return: """ l = [] t = [] for f in paths: print(f) img = load_img(f) if resize: img = img.resize(kernel) image_name = f.split('/')[-1] process_image(img, kernel, l, t, mode, img_filter=img_filter, channels=channels, model=model, image_name=image_name, **kwargs) return np.array(l), np.array(t)
def predict(imagepath, target_x, target_y, name, model): if imagepath.startswith('http://') or imagepath.startswith('https://') or imagepath.startswith('ftp://'): response = requests.get(imagepath) img = Image.open(BytesIO(response.content)) img = img.resize((target_x, target_y)) else: if not os.path.exists(imagepath): raise Exception('Input image file does not exist') img = image.load_img(imagepath, target_size=(target_x, target_y)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = processInputImage(name, x) preds = decodePrediction(name, model.predict(x)) result = [] for p in preds[0]: result.append({"synset": p[0], "text": p[1], "prediction": float("{0:.2f}".format((p[2] * 100)))}) return json.loads(jsonpickle.encode(result, unpicklable=False))
def main(): # Load the trained model. model = VGG19(weights='imagenet') print_layers(model) # load and preprocess image img_path = 'elephant.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # predict the class probabilities preds = model.predict(x) # decode the results into a list of tuples (class, description, probability) # (one such list for each sample in the batch) print('--------------------------------') print('Predicted:', decode_predictions(preds, top=3)[0])
def split_datasets(label, valid_ratio=0.1, max_num=args.fill_up_to*0.1, per_class=True): class_directory = os.path.join(args.samples, label) if os.path.isdir(class_directory): imgs = [i for i in os.listdir(class_directory) if i.endswith('jpg') or i.endswith('jpeg')] random.shuffle(imgs) validation_set = imgs[:int(min(len(imgs)*valid_ratio, max_num))] train_set = imgs[int(min(len(imgs)*valid_ratio, max_num)):] cu.create_dir_if_not_exists(os.path.join(args.samples_out, 'valid', label)) for i in validation_set: from_img = os.path.join(args.samples, label, i) to_path = os.path.join(args.samples_out, 'valid', label) if not args.crop: shutil.copy(from_img, to_path) else: img = load_img(from_img) width = img.size[0] height = int(img.size[1] * 0.1) to_img = os.path.join(to_path, i) img.crop((0,0,width, height)).save(to_img) print ('Label %s copied %d samples to validation set.' %(label, len(validation_set))) return (train_set, validation_set) else: return ([], [])
def create_test_data(self): # ?????npy i = 0 print('-' * 30) print('Creating training images...') print('-' * 30) imgs = glob.glob(self.test_path + "/*." + self.img_type) # deform/train print(len(imgs)) imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8) for imgname in imgs: midname = imgname[imgname.rindex("/") + 1:] # ????? img = load_img(self.test_path + "/" + midname, grayscale=True) # ?????? img = img_to_array(img) imgdatas[i] = img if i % 100 == 0: print('Done: {0}/{1} images'.format(i, len(imgs))) i += 1 print('loading done', imgdatas.shape) np.save(self.npy_path + '/imgs_test.npy', imgdatas) # ?30?????30?label??npy?? # np.save(self.npy_path + '/imgs_mask_train.npy', imglabels) print('Saving to .npy files done.')
def load_dataset(filedir): """ ???? :param filedir: :return: """ image_data_list = [] label = [] train_image_list = os.listdir(filedir + '/train') for img in train_image_list: url = os.path.join(filedir + '/train/' + img) image = load_img(url, target_size=(128, 128)) image_data_list.append(img_to_array(image)) label.append(img.split('-')[0]) img_data = np.array(image_data_list) img_data = img_data.astype('float32') img_data /= 255 return img_data, label
def get_all_images(image_names, path_voc): images = [] for j in range(np.size(image_names)): image_name = image_names[0][j] string = path_voc + '/JPEGImages/' + image_name + '.jpg' images.append(image.load_img(string, False)) return images
def get_all_images_pool(image_names, path_voc): images = [] for j in range(np.size(image_names)): image_name = image_names[j] string = path_voc + '/JPEGImages/' + image_name + '.jpg' images.append(image.load_img(string, False)) return images
def preprocessImage(imagePath): img = load_img(imagePath, target_size=(244, 244)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) img = img.reshape(img.shape[1:]) return img
def preprocess_image(image_path): img = load_img(image_path, target_size=(im_height, im_width)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) return img
def load_and_process(img_path, target_size=None): # Feed in the image, convert to array img = load_img(img_path, target_size=target_size) img = img_to_array(img) # Add the batch dimension img = np.expand_dims(img, axis=0) # Perform the usual ImageNet preprocessing img = preprocess_input(img) return img
def loadImage(path): img = image.load_img(path[0], target_size=(299, 299)) x = image.img_to_array(img) x /= 127.5 x -= 1 return x
def preprocess_image(image_path): img = load_img(image_path, target_size=(img_nrows, img_ncols)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) return img # util function to convert a tensor into a valid image
def preprocess_image(image_path): img = load_img(image_path, target_size=(img_nrows, img_ncols)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg19.preprocess_input(img) return img
def load_mask_labels(): '''Load both target and style masks. A mask image (nr x nc) with m labels/colors will be loaded as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf' ''' target_mask_img = load_img(target_mask_path, target_size=(img_nrows, img_ncols)) target_mask_img = img_to_array(target_mask_img) style_mask_img = load_img(style_mask_path, target_size=(img_nrows, img_ncols)) style_mask_img = img_to_array(style_mask_img) if K.image_dim_ordering() == 'th': mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T, target_mask_img.reshape((3, -1)).T]) else: mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)), target_mask_img.reshape((-1, 3))]) labels = kmeans(mask_vecs, nb_labels) style_mask_label = labels[:img_nrows * img_ncols].reshape((img_nrows, img_ncols)) target_mask_label = labels[img_nrows * img_ncols:].reshape((img_nrows, img_ncols)) stack_axis = 0 if K.image_dim_ordering() == 'th' else -1 style_mask = np.stack([style_mask_label == r for r in xrange(nb_labels)], axis=stack_axis) target_mask = np.stack([target_mask_label == r for r in xrange(nb_labels)], axis=stack_axis) return (np.expand_dims(style_mask, axis=0), np.expand_dims(target_mask, axis=0)) # Create tensor variables for images
def preprocess_image(image_path): img = load_img(image_path, target_size=(img_width, img_height)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) return img # util function to convert a tensor into a valid image
def get_faces(): X_train = np.concatenate(( [img_to_array(load_img('fine_tune/faces/zero/' + filename, target_size=(224, 224))) for filename in os.listdir('fine_tune/faces/zero/')], [img_to_array(load_img('fine_tune/faces/one/' + filename, target_size=(224, 224))) for filename in os.listdir('fine_tune/faces/one/')])) Y_train = np.concatenate(( np.zeros(len(os.listdir('fine_tune/faces/zero/'))), np.ones(len(os.listdir('fine_tune/faces/one/'))))) assert len(X_train) == len(Y_train) perm = np.random.permutation(len(X_train)) return (X_train[perm], Y_train[perm])
def process_image(image, target_shape): """Given an image, process it and return the array.""" # Load the image. h, w, _ = target_shape image = load_img(image, target_size=(h, w)) # Turn it into numpy, normalize and return. img_arr = img_to_array(image) x = (img_arr / 255.).astype(np.float32) return x
def load_mask_labels(): '''Load both target and style masks. A mask image (nr x nc) with m labels/colors will be loaded as a 4D boolean tensor: (1, m, nr, nc) for 'channels_first' or (1, nr, nc, m) for 'channels_last' ''' target_mask_img = load_img(target_mask_path, target_size=(img_nrows, img_ncols)) target_mask_img = img_to_array(target_mask_img) style_mask_img = load_img(style_mask_path, target_size=(img_nrows, img_ncols)) style_mask_img = img_to_array(style_mask_img) if K.image_data_format() == 'channels_first': mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T, target_mask_img.reshape((3, -1)).T]) else: mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)), target_mask_img.reshape((-1, 3))]) labels = kmeans(mask_vecs, num_labels) style_mask_label = labels[:img_nrows * img_ncols].reshape((img_nrows, img_ncols)) target_mask_label = labels[img_nrows * img_ncols:].reshape((img_nrows, img_ncols)) stack_axis = 0 if K.image_data_format() == 'channels_first' else -1 style_mask = np.stack([style_mask_label == r for r in xrange(num_labels)], axis=stack_axis) target_mask = np.stack([target_mask_label == r for r in xrange(num_labels)], axis=stack_axis) return (np.expand_dims(style_mask, axis=0), np.expand_dims(target_mask, axis=0)) # Create tensor variables for images
def preprocess_image(image_path): # Util function to open, resize and format pictures # into appropriate tensors. img = load_img(image_path) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = inception_v3.preprocess_input(img) return img
def procLine2(l, r): img_path = '{0}/{1}'.format(r, l['filename']) try: img = image.load_img(img_path, target_size=(224, 224)) return (True, l['filename']) except: return (False, l['filename'])
def _save_multi_cropped_imgs(src, dest): image = load_img(src) image, crop_coordinates = _prepare_image_for_cropping(image) dest_no_ext, ext = splitext(dest) for i, crop_position in enumerate(crop_coordinates): dest_i = "{:s}_{:d}{:s}".format(dest_no_ext, i, ext) cropped_img = image.crop(box=crop_position) assert cropped_img.size == IMGS_DIM_2D, \ 'Cropped image dimension is {:s}, instead of {:s}'\ .format(cropped_img.size, IMGS_DIM_2D) cropped_img.save(dest_i)
def _save_scaled_cropped_img(src, dest): image = load_img(src) image = fit(image, IMGS_DIM_2D, method=LANCZOS) image.save(dest) return image
def load_img_arr(p): return img_to_array(load_img(p))
def loadAndPreprocessKerasInceptionV3(raw_uri): # this is the canonical way to load and prep images in keras uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri image = img_to_array(load_img(uri, target_size=InceptionV3Constants.INPUT_SHAPE)) image = np.expand_dims(image, axis=0) return preprocess_input(image)
def _loadImageViaKeras(self, raw_uri): uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri image = img_to_array(load_img(uri)) image = np.expand_dims(image, axis=0) return preprocess_input(image)
def test_spimage_converter_module(self): """ spimage converter module must preserve original image """ img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg')) def exec_gfn_spimg_decode(spimg_dict, img_dtype): gfn = gfac.buildSpImageConverter('BGR', img_dtype) with IsolatedSession() as issn: feeds, fetches = issn.importGraphFunction(gfn, prefix="") feed_dict = dict( (tnsr, spimg_dict[tfx.op_name(tnsr, issn.graph)]) for tnsr in feeds) img_out = issn.run(fetches[0], feed_dict=feed_dict) return img_out def check_image_round_trip(img_arr): spimg_dict = imageArrayToStruct(img_arr).asDict() spimg_dict['data'] = bytes(spimg_dict['data']) img_arr_out = exec_gfn_spimg_decode( spimg_dict, imageTypeByOrdinal(spimg_dict['mode']).dtype) self.assertTrue(np.all(img_arr_out == img_arr)) for fp in img_fpaths: img = load_img(fp) img_arr_byte = img_to_array(img).astype(np.uint8) check_image_round_trip(img_arr_byte) img_arr_float = img_to_array(img).astype(np.float32) check_image_round_trip(img_arr_float) img_arr_preproc = iv3.preprocess_input(img_to_array(img)) check_image_round_trip(img_arr_preproc)
def test_pipeline(self): """ Pipeline should provide correct function composition """ img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg')) xcpt_model = Xception(weights="imagenet") stages = [('spimage', gfac.buildSpImageConverter('BGR', 'float32')), ('xception', GraphFunction.fromKeras(xcpt_model))] piped_model = GraphFunction.fromList(stages) for fpath in img_fpaths: target_size = tuple(xcpt_model.input.shape.as_list()[1:-1]) img = load_img(fpath, target_size=target_size) img_arr = np.expand_dims(img_to_array(img), axis=0) img_input = xcpt.preprocess_input(img_arr) preds_ref = xcpt_model.predict(img_input) spimg_input_dict = imageArrayToStruct(img_input).asDict() spimg_input_dict['data'] = bytes(spimg_input_dict['data']) with IsolatedSession() as issn: # Need blank import scope name so that spimg fields match the input names feeds, fetches = issn.importGraphFunction(piped_model, prefix="") feed_dict = dict( (tnsr, spimg_input_dict[tfx.op_name(tnsr, issn.graph)]) for tnsr in feeds) preds_tgt = issn.run(fetches[0], feed_dict=feed_dict) # Uncomment the line below to see the graph # tfx.write_visualization_html(issn.graph, # NamedTemporaryFile(prefix="gdef", suffix=".html").name) self.assertTrue(np.all(preds_tgt == preds_ref))
def testTFwPrediction(self): keras.backend.set_image_dim_ordering('tf') model = SqueezeNet() img = image.load_img('images/cat.jpeg', target_size=(227, 227)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) preds = model.predict(x) decoded_preds = decode_predictions(preds) #print('Predicted:', decoded_preds) self.assertIn(decoded_preds[0][0][1], 'tabby') #self.assertAlmostEqual(decode_predictions(preds)[0][0][2], 0.82134342)
def load_image(image_path, grayscale=False, target_size=None): pil_image = image.load_img(image_path, grayscale, target_size) return image.img_to_array(pil_image)
def read_img(img_path, target_size=None): img = image.load_img(img_path, target_size=target_size) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return np.squeeze(np.array(x, dtype=np.float32))
def imread(self, path): if 'http' == path[:4]: with contextlib.closing(urllib.urlopen(path)) as req: local_url = cStringIO.StringIO(req.read()) img = image.load_img(local_url, target_size=(self.target_dim, self.target_dim)) else: img = image.load_img(path, target_size=(self.target_dim, self.target_dim)) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = preprocess_input(img) return img
def load_img_from_file(img_path, target_size=None, time_out_image_downloading=1): image = load_img(img_path) if target_size is not None: image = image.resize((target_size[1], target_size[0])) img_array = img_to_array(image) / 255.0 img_array = img_array.reshape((1,) + img_array.shape) return img_array
def preprocess_image(image_path): img = load_img(image_path, target_size=(img_nrows, img_ncols)) img = img_to_array(img) img = np.expand_dims(img, axis=0) img = vgg16.preprocess_input(img) return img
def load_mask_labels(): '''Load both target and style masks. A mask image (nr x nc) with m labels/colors will be loaded as a 4D boolean tensor: (1, m, nr, nc) for 'th' or (1, nr, nc, m) for 'tf' ''' target_mask_img = load_img(target_mask_path, target_size=(img_nrows, img_ncols)) target_mask_img = img_to_array(target_mask_img) style_mask_img = load_img(style_mask_path, target_size=(img_nrows, img_ncols)) style_mask_img = img_to_array(style_mask_img) if K.image_dim_ordering() == 'th': mask_vecs = np.vstack([style_mask_img.reshape((3, -1)).T, target_mask_img.reshape((3, -1)).T]) else: mask_vecs = np.vstack([style_mask_img.reshape((-1, 3)), target_mask_img.reshape((-1, 3))]) labels = kmeans(mask_vecs, nb_labels) style_mask_label = labels[:img_nrows * img_ncols].reshape((img_nrows, img_ncols)) target_mask_label = labels[img_nrows * img_ncols:].reshape((img_nrows, img_ncols)) stack_axis = 0 if K.image_dim_ordering() == 'th' else -1 style_mask = np.stack([style_mask_label == r for r in range(nb_labels)], axis=stack_axis) target_mask = np.stack([target_mask_label == r for r in range(nb_labels)], axis=stack_axis) return (np.expand_dims(style_mask, axis=0), np.expand_dims(target_mask, axis=0)) # Create tensor variables for images