我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.WholeFileReader()。
def inputs(image_dir, batch_size, min_queue_examples, input_height, input_width): def read_images(image_paths): filename_queue = tf.train.string_input_producer(image_paths) reader = tf.WholeFileReader() key, value = reader.read(filename_queue) image = tf.image.decode_image(value) image = tf.image.convert_image_dtype(image, dtype=tf.float32) image.set_shape([None, None, 3]) return image image_paths = get_image_paths(image_dir) images = read_images(image_paths) images = tf.image.crop_to_bounding_box(images, 30, 0, 178, 178) # images = tf.image.random_flip_left_right(images) images = tf.image.resize_images(images, [input_height, input_width]) total_image_count = len(image_paths) input_batch = tf.train.shuffle_batch([images], batch_size=batch_size, num_threads=16, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples) return input_batch, total_image_count
def read_whole_features(file_pattern, num_epochs=1): ''' Return `feature`: `dict` whose keys are `sp`, `ap`, `f0`, `en`, `speaker` ''' files = tf.gfile.Glob(file_pattern) print('{} files found'.format(len(files))) filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs) reader = tf.WholeFileReader() key, value = reader.read(filename_queue) print("Processing {}".format(key), flush=True) value = tf.decode_raw(value, tf.float32) value = tf.reshape(value, [-1, FEAT_DIM]) return { 'sp': value[:, :SP_DIM], 'ap': value[:, SP_DIM : 2*SP_DIM], 'f0': value[:, SP_DIM * 2], 'en': value[:, SP_DIM * 2 + 1], 'speaker': tf.cast(value[:, SP_DIM * 2 + 2], tf.int64), 'filename': key, }
def _read_input(self, filename_queue): class DataRecord(object): pass reader = tf.WholeFileReader() key, value = reader.read(filename_queue) record = DataRecord() decoded_image = tf.image.decode_jpeg(value, channels=3) # Assumption:Color images are read and are to be generated # decoded_image_4d = tf.expand_dims(decoded_image, 0) # resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.target_image_size, self.target_image_size]) # record.input_image = tf.squeeze(resized_image, squeeze_dims=[0]) cropped_image = tf.cast( tf.image.crop_to_bounding_box(decoded_image, 55, 35, self.crop_image_size, self.crop_image_size), tf.float32) decoded_image_4d = tf.expand_dims(cropped_image, 0) resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.resized_image_size, self.resized_image_size]) record.input_image = tf.squeeze(resized_image, squeeze_dims=[0]) return record
def readFromFile(self, filename_list, batch_size, img_shape, num_threads=4, min_after_dequeue=10000): filename_queue = tf.train.string_input_producer(filename_list, shuffle=False) reader = tf.WholeFileReader() _, serialized_example = reader.read(filename_queue) image = tf.image.decode_jpeg(serialized_example, channels=3) image.set_shape(img_shape) images = tf.train.shuffle_batch( [image], batch_size=batch_size, num_threads=num_threads, capacity=min_after_dequeue + (num_threads + 1) * batch_size, min_after_dequeue=min_after_dequeue, ) return images
def read_cifar10(filename_queue): # Read the images and generate the decode from PNG image imageReader = tf.WholeFileReader() image_key, image_value = imageReader.read(filename_queue) image_decode = tf.image.decode_png(image_value, channels=1) image_decode = tf.cast(image_decode, tf.float32) # Preprocess data image_key = rename_image_filename(image_key) # rename image filename #label = search_label(image_key) #label = 1 #label = random.choice([1, 2, 3, 4, 5, 6, 7]) label = random.choice([1, 2, 3, 4]) # CREATE OBJECT class Record(object): pass record = Record() # Instantiate object record.key = image_key record.label = tf.cast(label, tf.int32) record.image = image_decode #with tf.Session() as ppro: # result = ppro.run([record.label]) # print(result) return record
def _read_input(filename_queue): class DataRecord(object): pass reader = tf.WholeFileReader() key, value = reader.read(filename_queue) record = DataRecord() decoded_image = tf.image.decode_jpeg(value, channels=NUM_OF_CHANNELS) decoded_image_4d = tf.expand_dims(decoded_image, 0) resized_image = tf.image.resize_bilinear(decoded_image_4d, [IMAGE_SIZE, IMAGE_SIZE]) record.input_image = tf.squeeze(resized_image, squeeze_dims=[0]) cropped_image = tf.cast(tf.image.crop_to_bounding_box(decoded_image, 55, 35, MODEL_IMAGE_SIZE, MODEL_IMAGE_SIZE), tf.float32) decoded_image_4d = tf.expand_dims(cropped_image, 0) resized_image = tf.image.resize_bilinear(decoded_image_4d, [IMAGE_SIZE, IMAGE_SIZE]) record.input_image = tf.squeeze(resized_image, squeeze_dims=[0]) return record
def __init__(self, images_list_path, num_epoch, batch_size): # filling the record_list input_file = open(images_list_path, 'r') self.record_list = [] for line in input_file: line = line.strip() self.record_list.append(line) filename_queue = tf.train.string_input_producer(self.record_list, num_epochs=num_epoch) image_reader = tf.WholeFileReader() _, image_file = image_reader.read(filename_queue) image = tf.image.decode_jpeg(image_file, 3) #preprocess hr_image = tf.image.resize_images(image, [32, 32]) lr_image = tf.image.resize_images(image, [8, 8]) hr_image = tf.cast(hr_image, tf.float32) lr_image = tf.cast(lr_image, tf.float32) # min_after_dequeue = 1000 capacity = min_after_dequeue + 400 * batch_size self.hr_images, self.lr_images = tf.train.shuffle_batch([hr_image, lr_image], batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue)
def image(n, size, path, epochs=2, shuffle=True, crop=True): filenames = [join(path, f) for f in listdir(path) if isfile(join(path, f))] if not shuffle: filenames = sorted(filenames) png = filenames[0].lower().endswith( 'png') # If first file is a png, assume they all are filename_queue = tf.train.string_input_producer( filenames, num_epochs=epochs, shuffle=shuffle) reader = tf.WholeFileReader() _, img_bytes = reader.read(filename_queue) image = tf.image.decode_png( img_bytes, channels=3) if png else tf.image.decode_jpeg( img_bytes, channels=3) processed_image = preprocess(image, size, False) if not crop: return tf.train.batch([processed_image], n, dynamic_pad=True) cropped_image = tf.slice(processed_image, [0, 0, 0], [size, size, 3]) cropped_image.set_shape((size, size, 3)) images = tf.train.batch([cropped_image], n) return images
def build_batch_reader(paths_image, batch_size): """ """ file_name_queue = tf.train.string_input_producer(paths_image) reader_key, reader_val = tf.WholeFileReader().read(file_name_queue) # decode a raw input image image = tf.image.decode_jpeg(reader_val, channels=3) # to float32 and -1.0 ~ +1.0 image = tf.cast(image, dtype=tf.float32) / 127.5 - 1.0 # scale up to increase training data image = tf.image.resize_images(image, [264, 264]) # crop to 256 x 256 for the model. # also, a batch need concreate image size image = tf.random_crop(image, size=[256, 256, 3]) # random horizontal flipping to increase training data image = tf.image.random_flip_left_right(image) # create bacth return tf.train.batch(tensors=[image], batch_size=batch_size)
def _read_input(self, filename_queue): class DataRecord(object): pass reader = tf.WholeFileReader() key, value = reader.read(filename_queue) record = DataRecord() decoded_image = tf.image.decode_jpeg(value, channels=3) # Assumption:Color images are read and are to be generated # decoded_image_4d = tf.expand_dims(decoded_image, 0) # resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.target_image_size, self.target_image_size]) # record.input_image = tf.squeeze(resized_image, squeeze_dims=[0]) decoded_image_4d = tf.expand_dims(decoded_image, 0) resized_image = tf.image.resize_bilinear(decoded_image_4d, [self.resized_image_size, self.resized_image_size]) record.input_image = tf.squeeze(resized_image, squeeze_dims=[0]) return record
def SingleFileReader(filename, shape, rtype='tanh', ext='jpg'): n, h, w, c = shape if ext == 'jpg' or ext == 'jpeg': decoder = tf.image.decode_jpeg elif ext == 'png': decoder = tf.image.decode_png else: raise ValueError('Unsupported file type: {:s}.'.format(ext) + ' (only *.png and *.jpg are supported') filename_queue = tf.train.string_input_producer(filename, shuffle=False) reader = tf.WholeFileReader() key, value = reader.read(filename_queue) img = decoder(value, channels=c) img = tf.image.crop_to_bounding_box(img, 0, 0, h, w) img = tf.to_float(img) if rtype == 'tanh': img = tf.div(img, 127.5) - 1. imgs = tf.train.batch( [img], batch_size=n, capacity=1) return imgs, key
def read_single_image(image_path): # image = cv2.imread(image_path, cv2.CV_LOAD_IMAGE_UNCHANGED) image = Image.open(image_path) image = tf.convert_to_tensor(np.asarray(image)) # image =tf.contrib.keras.preprocessing.image.load_img(image_path)# [image_path] # image_queue = tf.train.string_input_producer(image) # reader = tf.WholeFileReader() # key , value = reader.read(image_queue) # image = tf.image.decode_jpeg(value,channels=3) assert image is not None image = tf.image.resize_image_with_crop_or_pad( image=image, target_height=height, target_width=width, ) image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 image = tf.reshape(image, [-1, height, width, 3]) return image
def image(n, size, path, epochs=2, shuffle=True, crop=True): # for macOS if exists(join(path, '.DS_Store')): remove(join(path, '.DS_Store')) filenames = [join(path, f) for f in listdir(path) if isfile(join(path, f))] if not shuffle: filenames = sorted(filenames) png = filenames[0].lower().endswith('png') # If first file is a png, assume they all are filename_queue = tf.train.string_input_producer(filenames, shuffle=shuffle, num_epochs=epochs) reader = tf.WholeFileReader() _, img_bytes = reader.read(filename_queue) image = tf.image.decode_png(img_bytes, channels=3) if png else tf.image.decode_jpeg(img_bytes, channels=3) processed_image = preprocess(image, size) return tf.train.batch([processed_image], n, dynamic_pad=True)
def input_setup(self): ''' This function basically setup variables for taking image input. filenames_A/filenames_B -> takes the list of all training images self.image_A/self.image_B -> Input image with each values ranging from [-1,1] ''' filenames_A = tf.train.match_filenames_once("./input/horse2zebra/trainA/*.jpg") self.queue_length_A = tf.size(filenames_A) filenames_B = tf.train.match_filenames_once("./input/horse2zebra/trainB/*.jpg") self.queue_length_B = tf.size(filenames_B) filename_queue_A = tf.train.string_input_producer(filenames_A) filename_queue_B = tf.train.string_input_producer(filenames_B) image_reader = tf.WholeFileReader() _, image_file_A = image_reader.read(filename_queue_A) _, image_file_B = image_reader.read(filename_queue_B) self.image_A = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(image_file_A),[256,256]),127.5),1) self.image_B = tf.subtract(tf.div(tf.image.resize_images(tf.image.decode_jpeg(image_file_B),[256,256]),127.5),1)
def read_images(self, input_queue): reader = tf.WholeFileReader() filename, content = reader.read(input_queue) image = tf.image.decode_jpeg(content, channels=self.channel) image = tf.cast(image, tf.float32) image = tf.image.resize_images(image, size=[self.height,self.width]) return image
def read(filenames): file_names=open(filenames,'rb').read().split('\n') random.shuffle(file_names) filename_queue=tf.train.string_input_producer(file_names,capacity=1000,num_epochs=100) reader=tf.WholeFileReader() _,value=reader.read(filename_queue) image=tf.image.decode_jpeg(value) cropped=tf.random_crop(image,[resolution*4,resolution*4,3]) random_flipped=tf.image.random_flip_left_right(cropped) minibatch=tf.train.batch([random_flipped],batch_size,capacity=300) rescaled=tf.image.resize_bicubic(minibatch,[resolution,resolution])/127.5-1 return minibatch,rescaled
def read(filenames): file_names=open(filenames,'rb').read().split('\n') random.shuffle(file_names) filename_queue=tf.train.string_input_producer(file_names,capacity=3000,num_epochs=100)#shuffled input_producer by default reader=tf.WholeFileReader() _,value=reader.read(filename_queue) image=tf.image.decode_jpeg(value) cropped=tf.random_crop(image,[resolution*4,resolution*4,3]) random_flipped=tf.image.random_flip_left_right(cropped) minibatch=tf.cast(tf.train.batch([random_flipped],batch_size,capacity=300),tf.float32) rescaled=tf.image.resize_bicubic(minibatch,[resolution,resolution]) rescaled=rescaled*2/255-1 return minibatch,rescaled
def image_batch(image_paths, batch_size, load_size=286, crop_size=256, channels=3, shuffle=True, num_threads=4, min_after_dequeue=100, allow_smaller_final_batch=False): """ for jpg and png files """ # queue and reader img_queue = tf.train.string_input_producer(image_paths, shuffle=shuffle) reader = tf.WholeFileReader() # preprocessing _, img = reader.read(img_queue) img = tf.image.decode_image(img, channels=3) ''' tf.image.random_flip_left_right should be used before tf.image.resize_images, because tf.image.decode_image reutrns a tensor without shape which makes tf.image.resize_images collapse. Maybe it's a bug! ''' img = tf.image.random_flip_left_right(img) img = tf.image.resize_images(img, [load_size, load_size]) img = tf.random_crop(img, [crop_size, crop_size, channels]) img = tf.cast(img, tf.float32) / 127.5 - 1 # batch if shuffle: capacity = min_after_dequeue + (num_threads + 1) * batch_size img_batch = tf.train.shuffle_batch([img], batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue, num_threads=num_threads, allow_smaller_final_batch=allow_smaller_final_batch) else: img_batch = tf.train.batch([img], batch_size=batch_size, allow_smaller_final_batch=allow_smaller_final_batch) return img_batch, len(image_paths)
def setup_test_inputs(sess, filenames, image_size=None, capacity_factor=3): if image_size is None: image_size = FLAGS.sampleSize # Read each JPEG file reader = tf.WholeFileReader() filename_queue = tf.train.string_input_producer(filenames) key, value = reader.read(filename_queue) channels = 3 image = tf.image.decode_jpeg(value, channels=channels, name="dataset_image") image.set_shape([None, None, channels]) crop_size = 128 image = tf.random_crop(image, [crop_size, crop_size, 3]) image = tf.reshape(image, [1, crop_size, crop_size, 3]) image = tf.cast(image, tf.float32) / 255.0 if crop_size != image_size: image = tf.image.resize_area(image, [image_size, image_size]) # The feature is simply a Kx downscaled version K = 4 downsampled = tf.image.resize_area(image, [image_size // K, image_size // K]) feature = tf.reshape(downsampled, [image_size // K, image_size // K, 3]) label = tf.reshape(image, [image_size, image_size, 3]) # Using asynchronous queues features, labels = tf.train.batch([feature, label], batch_size=FLAGS.batch_test_size, num_threads=4, capacity=capacity_factor * FLAGS.batch_test_size, name='labels_and_features') tf.train.start_queue_runners(sess=sess) return features, labels
def read_input(image_queue): # Read the images and generate the decode from PNG image imageReader = tf.WholeFileReader() image_key, image_value = imageReader.read(image_queue) image_decode = tf.image.decode_png(image_value, channels=1) image_decode = tf.cast(image_decode, tf.float32) # Preprocess data image_key = rename_image_filename(image_key) # rename image filename label = search_label(image_key) # CREATE OBJECT class Record(object): pass record = Record() # Instantiate object record.key = image_key record.label = tf.cast(label, tf.int32) record.image = image_decode # PROCESSING IMAGES # reshaped_image = tf.cast(record.image, tf.float32) # height = 245 # width = 320 height = 96 width = 96 # Image processing for training the network. Note the many random distortions applied to the image. # Randomly crop a [height, width] section of the image. distorted_image = tf.random_crop(record.image, [height, width, 1]) # Randomly flip the image horizontally. distorted_image = tf.image.random_flip_left_right(distorted_image) # Because these operations are not commutative, consider randomizing randomize the order their operation. distorted_image = tf.image.random_brightness(distorted_image, max_delta=63) distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8) # Subtract off the mean and divide by the variance of the pixels. float_image = tf.image.per_image_whitening(distorted_image) return generate_train_batch(record.label, float_image)
def batch_queue_for_training_normal(data_path): num_channel = argument_sr.options.input_channel image_height = argument_sr.options.height image_width = argument_sr.options.width batch_size = argument_sr.options.batch_size threads_num = argument_sr.options.num_threads min_queue_examples = argument_sr.options.min_after_dequeue filename_queue = tf.train.string_input_producer(get_all_file(path=data_path, endFormat=['jpg'])) file_reader = tf.WholeFileReader() _, image_file = file_reader.read(filename_queue) patch = tf.image.decode_jpeg(image_file, 3) patch = tf.image.convert_image_dtype(patch, dtype=tf.float32) # patch = RGB_to_Tcrbr_Y(patch) image_HR8 = tf.random_crop(patch, [image_height, image_width, num_channel]) image_HR4 = tf.image.resize_images(image_HR8, [int(image_height / 2), int(image_width / 2)], method=tf.image.ResizeMethod.BICUBIC) image_HR2 = tf.image.resize_images(image_HR8, [int(image_height / 4), int(image_width / 4)], method=tf.image.ResizeMethod.BICUBIC) image_LR = tf.image.resize_images(image_HR8, [int(image_height / 8), int(image_width / 8)], method=tf.image.ResizeMethod.BICUBIC) low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch = tf.train.shuffle_batch( [image_LR, image_HR2, image_HR4, image_HR8], batch_size=batch_size, num_threads=threads_num, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples) return low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch
def batch_queue_for_training_mkdir(): num_channel = argument_sr.options.input_channel image_height = argument_sr.options.height image_width = argument_sr.options.width batch_size = argument_sr.options.batch_size threads_num = argument_sr.options.num_threads filename_queue = tf.train.string_input_producer(argument_sr.options.get_file_list()) file_reader = tf.WholeFileReader() _, image_file = file_reader.read(filename_queue) patch = tf.image.decode_jpeg(image_file, 3) patch = tf.image.convert_image_dtype(patch, dtype=tf.float32) patch = RGB_to_Tcrbr_Y(patch) image_HR8 = tf.random_crop(patch, [image_height, image_width, num_channel]) image_HR4 = tf.image.resize_images(image_HR8, [int(image_height / 2), int(image_width / 2)], method=tf.image.ResizeMethod.BICUBIC) image_HR2 = tf.image.resize_images(image_HR8, [int(image_height / 4), int(image_width / 4)], method=tf.image.ResizeMethod.BICUBIC) image_LR = tf.image.resize_images(image_HR8, [int(image_height / 8), int(image_width / 8)], method=tf.image.ResizeMethod.BICUBIC) low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch = tf.train.batch( [image_LR, image_HR2, image_HR4, image_HR8], batch_size=batch_size, num_threads=threads_num, capacity=3 * batch_size) filename_queue.close() return low_res_batch, high2_res_batch, high4_res_batch, high8_res_batch
def _read_raw_images(path, is_directory=True): """Reads directory of images in tensorflow Args: path: is_directory: Returns: """ images = [] png_files = [] jpeg_files = [] reader = tf.WholeFileReader() png_files_path = glob.glob(os.path.join(path, '*.[pP][nN][gG]')) jpeg_files_path = glob.glob(os.path.join(path, '*.[jJ][pP][eE][gG]')) jpg_files_path = glob.glob(os.path.join(path, '*.[jJ][pP][gG]')) if is_directory: for filename in png_files_path: png_files.append(filename) for filename in jpeg_files_path: jpeg_files.append(filename) for filename in jpg_files_path: jpeg_files.append(filename) else: raise ValueError('Currently only batch read from directory supported') # Decode if there is a PNG file: if len(png_files) > 0: png_file_queue = tf.train.string_input_producer(png_files) pkey, pvalue = reader.read(png_file_queue) p_img = tf.image.decode_png(pvalue) if len(jpeg_files) > 0: jpeg_file_queue = tf.train.string_input_producer(jpeg_files) jkey, jvalue = reader.read(jpeg_file_queue) j_img = tf.image.decode_jpeg(jvalue) return # TODO: return normal thing
def read_and_decode_wholefile(filename_queue, imshape, normalize=False, flatten=True): """Reads Args: filename_queue: imshape: normalize: flatten: Returns: """ reader = tf.WholeFileReader() key, value = reader.read(filename_queue) image = tf.image.decode_png(value, channels=3) if flatten: num_elements = 1 for i in imshape: num_elements = num_elements * i #print num_elements image = tf.reshape(image, [num_elements]) image.set_shape(num_elements) else: image = tf.reshape(image, imshape) image.set_shape(imshape) if normalize: # Convert from [0, 255] -> [-0.5, 0.5] floats. image = tf.cast(image, tf.float32) image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 # don't care label = 1 return image, label
def pix2pix_shoes_bags(self): shoes_filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(dirs['pix2pix_shoes']), capacity=200) bags_filename_queue = tf.train.string_input_producer(tf.train.match_filenames_once(dirs['pix2pix_bags']), capacity=200) image_reader = tf.WholeFileReader() _, img_shoes = image_reader.read(shoes_filename_queue) _, img_bags = image_reader.read(bags_filename_queue) # decoding jpg images img_shoes, img_bags = tf.image.decode_jpeg(img_shoes), tf.image.decode_jpeg(img_bags) # image size : 64x64x3 img_shoes = tf.cast(tf.reshape(img_shoes, shape=[self.input_height, self.input_width, self.input_channel]), dtype=tf.float32) / 255. img_bags = tf.cast(tf.reshape(img_bags, shape=[self.input_height, self.input_width, self.input_channel]), dtype=tf.float32) / 255. self.batch_shoes = tf.train.shuffle_batch([img_shoes], batch_size=self.batch_size, num_threads=self.num_threads, capacity=1024, min_after_dequeue=256) self.batch_bags = tf.train.shuffle_batch([img_bags], batch_size=self.batch_size, num_threads=self.num_threads, capacity=1024, min_after_dequeue=256)
def pix2pix_vangogh(self): queue_A = tf.train.string_input_producer(tf.train.match_filenames_once(dirs['pix2pix_vangogh-A']), num_epochs=self.epoch, shuffle=True) queue_B = tf.train.string_input_producer(tf.train.match_filenames_once(dirs['pix2pix_vangogh-B']), num_epochs=self.epoch, shuffle=True) image_reader = tf.WholeFileReader() _, img_A = image_reader.read(queue_A) _, img_B = image_reader.read(queue_B) # decoding jpg images img_A = tf.image.decode_jpeg(img_A) img_B = tf.image.decode_jpeg(img_B) # image size : 64x64x3 self.img_A = tf.cast(tf.reshape(img_A, shape=[None, self.input_height, self.input_width, self.input_channel]), dtype=tf.float32) / 255. self.img_B = tf.cast(tf.reshape(img_B, shape=[None, self.input_height, self.input_width, self.input_channel]), dtype=tf.float32) / 255. print(self.img_A.shape) print(self.img_B.shape) # min_queue_examples = self.batch_size # self.batch_A = tf.train.shuffle_batch([img_A], # batch_size=self.batch_size, # num_threads=self.num_threads, # capacity=min_queue_examples + 3 * self.batch_size, # min_after_dequeue=min_queue_examples) # self.batch_B = tf.train.shuffle_batch([img_B], # batch_size=self.batch_size, # num_threads=self.num_threads, # capacity=min_queue_examples + 3 * self.batch_size, # min_after_dequeue=min_queue_examples)
def __init__(self, data_path, iterations, batch_size): if FLAGS.first_time: with open('fnames.txt', 'w') as f: records = lambda x: os.path.abspath(data_path + '/' + x) self.records = list(map(records, os.listdir(data_path))) for record in self.records: f.write(record + '\n') else: with open('fnames.txt', 'r') as f: self.records = [] for line in f: self.records.append(line.strip()) filename_queue = tf.train.string_input_producer(self.records) image_reader = tf.WholeFileReader() _, image_file = image_reader.read(filename_queue) image = tf.image.decode_jpeg(image_file, 3) hr_image = tf.image.resize_images(image, [32, 32]) # downsample image lr_image = tf.image.resize_images(image, [8, 8]) # REALLY downsample image hr_image = tf.cast(hr_image, tf.float32) lr_image = tf.cast(lr_image, tf.float32) min_after_dequeue = 1000 capacity = min_after_dequeue + 400 * batch_size # batches images of shape [batch_size, 32, 32, 3],[batch_size, 8, 8, 3] self.hr_images, self.lr_images = tf.train.shuffle_batch([hr_image, lr_image], batch_size=batch_size, min_after_dequeue=min_after_dequeue, capacity=capacity)
def _image_op_imagenet(filenames, relative_colors): filename_queue = tf.train.string_input_producer(filenames, num_epochs=1) reader = tf.WholeFileReader() _, value = reader.read(filename_queue) image = tf.image.decode_jpeg(value, channels=3) image = tf.cast(image, tf.float32) if relative_colors: image = util.absolute_to_relative_colors(image) return image
def _preproc_image_batch(self, batch_size, num_threads=1): ''' This function is only used for queue input pipeline. It reads a filename from the filename queue, decodes the image, pushes it through a pre-processing function and then uses tf.train.batch to generate batches. :param batch_size: int, batch size :param num_threads: int, number of input threads (default=1) :return: tf.Tensor, batch of pre-processed input images ''' if ("resnet_v2" in self._network_name) and (self._preproc_func_name is None): raise ValueError("When using ResNet, please perform the pre-processing " "function manually. See here for details: " "https://github.com/tensorflow/models/tree/master/slim") # Read image file from disk and decode JPEG reader = tf.WholeFileReader() image_filename, image_raw = reader.read(self._filename_queue) image = tf.image.decode_jpeg(image_raw, channels=3) # Image preprocessing preproc_func_name = self._network_name if self._preproc_func_name is None else self._preproc_func_name image_preproc_fn = preprocessing_factory.get_preprocessing(preproc_func_name, is_training=False) image_preproc = image_preproc_fn(image, self.image_size, self.image_size) # Read a batch of preprocessing images from queue image_batch = tf.train.batch( [image_preproc, image_filename], batch_size, num_threads=num_threads, allow_smaller_final_batch=True) return image_batch
def get_image(filepath, height, width, preprocess_fn, queue=None): png = filepath.lower().endswith('png') if queue is None: img_bytes = tf.read_file(filepath) else: reader = tf.WholeFileReader() _, img_bytes = reader.read(queue) image = tf.image.decode_png(img_bytes, channels=3) if png else tf.image.decode_jpeg(img_bytes, channels=3) return preprocess_fn(image, height, width)
def load_target_image(): """ """ file_names = tf.train.string_input_producer([FLAGS.target_image_path]) _, image = tf.WholeFileReader().read(file_names) # Decode byte data, no gif please. # NOTE: tf.image.decode_image can decode both jpeg and png. However, the # shape (height and width) is unknown. image = tf.image.decode_png(image, channels=3) image = tf.cast(image, tf.float32) image = tf.image.resize_images(image, [FLAGS.image_size, FLAGS.image_size]) image = tf.reshape(image, [1, FLAGS.image_size, FLAGS.image_size, 3]) image = image / 127.5 - 1.0 with tf.Session() as session: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) image = session.run(image) coord.request_stop() coord.join(threads) return tf.constant(image, name='target_image')
def build_dataset_reader(): """ """ paths_png_wildcards = os.path.join(FLAGS.portraits_dir_path, '*.png') paths_png = glob.glob(paths_png_wildcards) file_name_queue = tf.train.string_input_producer(paths_png) reader = tf.WholeFileReader() reader_key, reader_val = reader.read(file_name_queue) image = tf.image.decode_png(reader_val, channels=3, dtype=tf.uint8) # assume the size of input images are either 128x128x3 or 64x64x3. if FLAGS.crop_image: image = tf.image.crop_to_bounding_box( image, FLAGS.crop_image_offset_y, FLAGS.crop_image_offset_x, FLAGS.crop_image_size_m, FLAGS.crop_image_size_m) image = tf.random_crop( image, size=[FLAGS.crop_image_size_n, FLAGS.crop_image_size_n, 3]) image = tf.image.resize_images(image, [FLAGS.image_size, FLAGS.image_size]) image = tf.image.random_flip_left_right(image) image = tf.cast(image, dtype=tf.float32) / 127.5 - 1.0 return tf.train.batch( tensors=[image], batch_size=FLAGS.batch_size, capacity=FLAGS.batch_size)
def readfile(filename): try: reader = tf.WholeFileReader() key,value = reader.read(filename) image = tf.image.decode_jpeg(value, channels=3) image = tf.image.resize_images(image, 224, 224) float_image = tf.div(tf.cast(image,tf.float32), 255) return float_image except: print -1 return readfile(filename)
def input_pipeline(filenames, batch_size, num_epochs=None, image_size=142, crop_size=256): with tf.device('/cpu:0'): filenames = tf.train.match_filenames_once(filenames) filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True) reader = tf.WholeFileReader() filename, value = reader.read(filename_queue) image = tf.image.decode_jpeg(value, channels=3) processed = tf.image.resize_images( image, [image_size, image_size], tf.image.ResizeMethod.BILINEAR) processed = tf.image.random_flip_left_right(processed) processed = tf.random_crop(processed, [crop_size, crop_size, 3]) # CHANGE TO 'CHW' DATA_FORMAT FOR FASTER GPU PROCESSING processed = tf.transpose(processed, [2, 0, 1]) processed = (tf.cast(processed, tf.float32) - 128.0) / 128.0 images = tf.train.batch( [processed], batch_size=batch_size, num_threads=NUM_THREADS, capacity=batch_size * 5) return images
def input_pipeline(filenames, batch_size, num_epochs=None, image_size=142, crop_size=256): with tf.device('/cpu:0'): filenames = tf.train.match_filenames_once(filenames) filename_queue = tf.train.string_input_producer(filenames, num_epochs=num_epochs, shuffle=True) reader = tf.WholeFileReader() filename, value = reader.read(filename_queue) image = tf.image.decode_jpeg(value, channels=3) processed = tf.image.resize_images( image, [image_size, image_size], tf.image.ResizeMethod.BILINEAR ) processed = tf.image.random_flip_left_right(processed) processed = tf.random_crop(processed, [crop_size, crop_size, 3] ) # CHANGE TO 'CHW' DATA_FORMAT FOR FASTER GPU PROCESSING processed = tf.transpose(processed, [2, 0, 1]) processed = (tf.cast(processed, tf.float32) - 128.0) / 128.0 images = tf.train.batch( [processed], batch_size = batch_size, num_threads = NUM_THREADS, capacity=batch_size * 5) return images
def disk_image_batch(image_paths, batch_size, shape, preprocess_fn=None, shuffle=True, num_threads=16, min_after_dequeue=100, allow_smaller_final_batch=False, scope=None): """ This function is suitable for bmp, jpg, png and gif files image_paths: string list or 1-D tensor, each of which is an iamge path preprocess_fn: single image preprocessing function """ with tf.name_scope(scope, 'disk_image_batch'): data_num = len(image_paths) # dequeue a single image path and read the image bytes; enqueue the whole file list _, img = tf.WholeFileReader().read(tf.train.string_input_producer(image_paths, shuffle=shuffle, capacity=data_num)) img = tf.image.decode_image(img) # preprocessing img.set_shape(shape) if preprocess_fn is not None: img = preprocess_fn(img) # batch datas if shuffle: capacity = min_after_dequeue + (num_threads + 1) * batch_size img_batch = tf.train.shuffle_batch([img], batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue, num_threads=num_threads, allow_smaller_final_batch=allow_smaller_final_batch) else: img_batch = tf.train.batch([img], batch_size=batch_size, allow_smaller_final_batch=allow_smaller_final_batch) return img_batch, data_num
def reader(self): """Return a reader for a single entry from the data set. See io_ops.py for details of Reader class. Returns: Reader object that reads the data set. """ return tf.WholeFileReader()
def _file_reader(self, filename_queue): # read file from queue reader = tf.WholeFileReader() _, img_bytes = reader.read(filename_queue) # decode it image_data = tf.image.decode_jpeg(img_bytes, channels=3) # preprocess it and return return preprocess(image_data, self.config)
def get_image_batch(pattern,batch_size,image_size=143,crop_size=128,train=True) : if (train) : random_flip = lambda x : tf.image.random_flip_left_right(x) crop = lambda x : tf.random_crop(x,[crop_size,crop_size,3]) queue = lambda : tf.train.string_input_producer(tf.train.match_filenames_once(pattern), num_epochs=None, shuffle=True) batch = lambda f,x: tf.train.shuffle_batch([f,x], batch_size=batch_size, num_threads=NUM_THREADS, capacity=batch_size*5, min_after_dequeue=batch_size*3) else : random_flip = lambda x : tf.identity(x) crop = lambda x : tf.image.resize_image_with_crop_or_pad(image,crop_size,crop_size) queue = lambda : tf.train.string_input_producer(tf.train.match_filenames_once(pattern), num_epochs=1, shuffle=False) batch = lambda f,x: tf.train.batch([f,x], batch_size=batch_size, num_threads=NUM_THREADS, allow_smaller_final_batch=False) def _preprocess(image) : image = random_flip(image) image = crop(image) image = tf.transpose(image,[2,0,1]) #change to CHW format image = (tf.cast(image,tf.float32) - 128.0)/128.0 #push in to [-1 to 1] area. return image with tf.device('/cpu:0'): filename_queue = queue() image_reader = tf.WholeFileReader() filename, image_file = image_reader.read(filename_queue) image = tf.image.decode_jpeg(image_file,3) resized = tf.image.resize_images(image,[image_size,image_size],tf.image.ResizeMethod.BILINEAR) preprocessed = _preprocess(resized) filenames, images = batch(filename,preprocessed) return filenames, images
def read_image(filename_queue, shuffle): image_reader = tf.WholeFileReader() path, image_file = image_reader.read(filename_queue) # Preprocessing image = tf.image.decode_jpeg(image_file, 3) if shuffle: # image = tf.image.random_contrast(image, lower=0.8, upper=1.2) if image.get_shape()[0] > IMAGE_SIZE['cropped'][0] and image.get_shape()[1] > IMAGE_SIZE['cropped'][1]: image = tf.random_crop(image, IMAGE_SIZE['cropped']) # image = tf.image.per_image_whitening(image) image = tf.image.resize_images(image, IMAGE_SIZE['resized']) image = image * (1. / 255) - 0.5 return [image, path]
def read_image(filename_queue): reader = tf.WholeFileReader() key,value = reader.read(filename_queue) image = tf.image.decode_png(value) return image
def load_image(path): """ """ file_names = tf.train.string_input_producer([path]) _, image = tf.WholeFileReader().read(file_names) # Decode byte data, no gif please. # NOTE: tf.image.decode_image can decode both jpeg and png. However, the # shape (height and width) is unknown. image = tf.image.decode_jpeg(image, channels=3) image = tf.cast(image, tf.float32) shape = tf.shape(image)[:2] image = tf.image.resize_images(image, [256, 256]) image = tf.reshape(image, [1, 256, 256, 3]) # for VggNet, subtract the mean color of it's training data. # image = tf.subtract(image, VggNet.mean_color_rgb()) image = tf.cast(image, dtype=tf.float32) / 127.5 - 1.0 # R/G/B to B/G/R image = tf.reverse(image, [3]) padding = [FLAGS.padding, FLAGS.padding] image = tf.pad( tensor=image, paddings=[[0, 0], padding, padding, [0, 0]], mode='symmetric') with tf.Session() as session: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) image, shape = session.run([image, shape]) coord.request_stop() coord.join(threads) return image, shape
def load_image(path): """ """ file_names = tf.train.string_input_producer([path]) _, image = tf.WholeFileReader().read(file_names) # Decode byte data, no gif please. # NOTE: tf.image.decode_image can decode both jpeg and png. However, the # shape (height and width) is unknown. image = tf.image.decode_jpeg(image, channels=3) image = tf.cast(image, tf.float32) shape = tf.shape(image)[:2] image = tf.image.resize_images(image, [224, 224]) image = tf.reshape(image, [1, 224, 224, 3]) # for VggNet, subtract the mean color of it's training data. image = tf.subtract(image, VggNet.mean_color_rgb()) # R/G/B to B/G/R image = tf.reverse(image, [3]) with tf.Session() as session: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(coord=coord) image, shape = session.run([image, shape]) coord.request_stop() coord.join(threads) return image, shape
def process_data(sess, filenames): """ This script gen the input images(downsample) and labels(origin images) """ images_size = FLAGS.input_image_size reader = tf.WholeFileReader() filename_queue = tf.train.string_input_producer(filenames) _, value = reader.read(filename_queue) channels = FLAGS.image_channels image = tf.image.decode_jpeg( value, channels=channels, name="dataset_image") # add data augmentation here image.set_shape([None, None, channels]) image = tf.reshape(image, [1, images_size, images_size, 3]) image = tf.cast(image, tf.float32) / 255.0 K = FLAGS.scale downsampled = tf.image.resize_area( image, [images_size // K, images_size // K]) upsampled = tf.image.resize_area(downsampled, [images_size, images_size]) feature = tf.reshape(upsampled, [images_size, images_size, 3]) label = tf.reshape(image, [images_size, images_size, 3]) features, labels = tf.train.shuffle_batch( [feature, label], batch_size=FLAGS.batch_size, num_threads=4, capacity=5000, min_after_dequeue=1000, name='labels_and_features') tf.train.start_queue_runners(sess=sess) print 'tag31', features.eval(), labels.get_shape() return features, labels
def single_JPEGimage_reader(filename_queue): image_reader = tf.WholeFileReader() _, image_file = image_reader.read(filename_queue) image = (tf.to_float(tf.image.decode_jpeg(image_file, channels=3))) image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) return image
def single_PNGimage_reader(filename_queue): image_reader = tf.WholeFileReader() _, image_file = image_reader.read(filename_queue) image = tf.to_float(tf.image.decode_png(image_file, channels=1)) image = tf.image.resize_images(image,[HEIGHT,WIDTH],method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) # pixel distribution ground truth return image