我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用tensorflow.uint16()。
def _convert_string_dtype(dtype): if dtype == 'float16': return tf.float16 if dtype == 'float32': return tf.float32 elif dtype == 'float64': return tf.float64 elif dtype == 'int16': return tf.int16 elif dtype == 'int32': return tf.int32 elif dtype == 'int64': return tf.int64 elif dtype == 'uint8': return tf.int8 elif dtype == 'uint16': return tf.uint16 else: raise ValueError('Unsupported dtype:', dtype)
def _get_image(self): _, records = self.reader.read(self.input_queue) file_names = tf.decode_csv(records, [tf.constant([], tf.string), tf.constant([], tf.string)], field_delim=None, name=None) im_raw = tf.read_file(self.base_folder+file_names[0]) seg_raw = tf.read_file(self.base_folder+file_names[1]) image = tf.reshape( tf.cast(tf.image.decode_png( im_raw, channels=1, dtype=tf.uint16), tf.float32), self.image_size, name='input_image') seg = tf.reshape( tf.cast(tf.image.decode_png( seg_raw, channels=1, dtype=tf.uint8), tf.float32), self.image_size, name='input_seg') return image, seg, file_names[0]
def _get_image(self): im_filename = tf.sparse_tensor_to_dense(tf.string_split(tf.expand_dims(self.raw_queue.dequeue(), 0), ':'), '') im_filename.set_shape([1, 2]) im_raw = tf.read_file(self.base_folder+im_filename[0][0]) seg_raw = tf.read_file(self.base_folder+im_filename[0][1]) image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), self.image_size, name='input_image') seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32), self.image_size, name='input_seg') if self.partial_frame: crop_y_start = int(((1-self.partial_frame) * self.image_size[0])/2) crop_y_end = int(((1+self.partial_frame) * self.image_size[0])/2) crop_x_start = int(((1-self.partial_frame) * self.image_size[1])/2) crop_x_end = int(((1+self.partial_frame) * self.image_size[1])/2) image = tf.slice(image, [crop_y_start, crop_x_start, 0], [crop_y_end, crop_x_end, -1]) seg = tf.slice(seg, [crop_y_start, crop_x_start, 0], [crop_y_end, crop_x_end, -1]) return image, seg, im_filename[0][0], im_filename[0][1]
def _get_image_sequence(self): filenames = self.raw_queue im_list = [] seg_list = [] for i in range(0, len(filenames), 2): im_filename, seg_filename = filenames[i], filenames[i+1] im_raw = tf.read_file(self.base_folder+im_filename) seg_raw = tf.read_file(self.base_folder+seg_filename) image_size = self.image_size + (1, ) image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), image_size) seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32), image_size) if self.partial_frame: crop_y_start = int(((1-self.partial_frame) * image_size[0])/2) crop_y_end = int(((1+self.partial_frame) * image_size[0])/2) crop_x_start = int(((1-self.partial_frame) * image_size[1])/2) crop_x_end = int(((1+self.partial_frame) * image_size[1])/2) image = tf.slice(image, [crop_y_start, crop_x_start, 0], [crop_y_end, crop_x_end, -1]) seg = tf.slice(seg, [crop_y_start, crop_x_start, 0], [crop_y_end, crop_x_end, -1]) im_list.append(image) seg_list.append(seg) return im_list, seg_list, filenames
def summANDsave(self,training = True): with tf.name_scope('saversANDsummaries'): if training: #saver self.saver_for_train = tf.train.Saver(keep_checkpoint_every_n_hours=2, max_to_keep=1) # will save all the tf graph vars!!! self.saver_for_play = tf.train.Saver(tf.trainable_variables(), keep_checkpoint_every_n_hours=2, max_to_keep=2) # used after training self.train_writer = tf.summary.FileWriter(self.trainSummaryDir) self.latest_checkpoint = tf.train.latest_checkpoint(self.trainDir) #summaries # loss self.lossTotalSummaryHolder = tf.placeholder(dtype = tf.float16) self.lossTotalSummary = tf.summary.scalar('total Loss per episode', self.lossTotalSummaryHolder) self.lossAvgSummaryHolder = tf.placeholder(dtype = tf.float16) self.lossAvgSummary = tf.summary.scalar('Avg.Loss per episode', self.lossAvgSummaryHolder) self.episodeUpdatesHolder = tf.placeholder(dtype = tf.uint16) self.episodeUpdates = tf.summary.scalar('Episode updates', self.episodeUpdatesHolder) else: self.latest_checkpoint = tf.train.latest_checkpoint(self.playDir) # reward self.rewardTotalSummaryHolder = tf.placeholder(dtype=tf.float16) self.rewardTotalSummary = tf.summary.scalar('total Reward per episode', self.rewardTotalSummaryHolder) self.rewardAvgSummaryHolder = tf.placeholder(dtype=tf.float16) self.rewardAvgSummary = tf.summary.scalar('Avg.Reward per episode', self.rewardAvgSummaryHolder) self.episodeDurSummaryHolder = tf.placeholder(dtype=tf.float16) self.episodeDurSummary = tf.summary.scalar('Episode duration', self.episodeDurSummaryHolder) #savers self.play_writer = tf.summary.FileWriter(self.playSummaryDir) #merger self.summary_merger = tf.summary.merge_all()
def test_input_uint16(self): self._assert_dtype( np.uint16, tf.uint16, np.matrix([[1, 2], [3, 4]], dtype=np.uint16))
def train_batch_inputs(dataset_csv_file_path, batch_size): with tf.name_scope('batch_processing'): if (os.path.isfile(dataset_csv_file_path) != True): raise ValueError('No data files found for this dataset') filename_queue = tf.train.string_input_producer([dataset_csv_file_path], shuffle=True) reader = tf.TextLineReader() _, serialized_example = reader.read(filename_queue) filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]]) # input png = tf.read_file(filename) image = tf.image.decode_png(png, channels=3) image = tf.cast(image, tf.float32) # target depth_png = tf.read_file(depth_filename) depth = tf.image.decode_png(depth_png, dtype=tf.uint16, channels=1) depth = tf.cast(depth, dtype=tf.int16) # resize image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH)) depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH)) invalid_depth = tf.sign(depth) # generate batch images, depths, invalid_depths = tf.train.batch( [image, depth, invalid_depth], batch_size = batch_size, num_threads = 4, capacity = 50 + 3 * batch_size ) return images, depths, invalid_depths
def eval_batch_inputs(dataset_csv_file_path, batch_size): with tf.name_scope('eval_batch_processing'): if (os.path.isfile(dataset_csv_file_path) != True): raise ValueError('No data files found for this dataset') filename_queue = tf.train.string_input_producer([dataset_csv_file_path], shuffle=True) reader = tf.TextLineReader() _, serialized_example = reader.read(filename_queue) filename, depth_filename = tf.decode_csv(serialized_example, [["path"], ["annotation"]]) # input png = tf.read_file(filename) image = tf.image.decode_png(png, channels=3) image = tf.cast(image, tf.float32) # target depth_png = tf.read_file(depth_filename) depth = tf.image.decode_png(depth_png, dtype=tf.uint16, channels=1) depth = tf.cast(depth, dtype=tf.int16) # resize image = tf.image.resize_images(image, (IMAGE_HEIGHT, IMAGE_WIDTH)) depth = tf.image.resize_images(depth, (TARGET_HEIGHT, TARGET_WIDTH)) invalid_depth = tf.sign(depth) # generate batch images, depths, invalid_depths = tf.train.batch( [image, depth, invalid_depth], batch_size = batch_size, num_threads = 4, capacity = 50 + 3 * batch_size ) return images, depths, invalid_depths
def _get_image(self): _, records = self.reader.read(self.input_queue) file_names = tf.decode_csv(records, [tf.constant([], tf.string), tf.constant([], tf.string)], field_delim=None, name=None) im_raw = tf.read_file(self.base_folder+file_names[0]) seg_raw = tf.read_file(self.base_folder+file_names[1]) image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), self.image_size, name='input_image') seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32), self.image_size, name='input_seg') return image, seg, file_names[0]
def _get_image(self): filename = tf.sparse_tensor_to_dense(tf.string_split(tf.expand_dims(self.raw_queue.dequeue(), 0), ':'), '') filename.set_shape([1, 2]) # seg_filename = self.seg_queue.dequeue() im_raw = tf.read_file(self.base_folder+filename[0][0]) seg_raw = tf.read_file(self.base_folder+filename[0][1]) image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), self.image_size, name='input_image') seg = tf.reshape(tf.cast(tf.image.decode_png(seg_raw, channels=1, dtype=tf.uint8), tf.float32), self.image_size, name='input_seg') return image, seg, filename[0][0], filename[0][1]
def _get_image(self): filename = self.raw_queue.dequeue() im_raw = tf.read_file(self.base_folder+filename) image_size = self.image_size + (1, ) image = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), image_size) return image, filename
def _get_image(self): image_size = self.image_size + (1, ) filename_fw = self.raw_queue_fw.dequeue() im_raw = tf.read_file(self.base_folder+filename_fw) image_fw = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), image_size) filename_bw = self.raw_queue_bw.dequeue() im_raw = tf.read_file(self.base_folder + filename_bw) image_bw = tf.reshape(tf.cast(tf.image.decode_png(im_raw, channels=1, dtype=tf.uint16), tf.float32), image_size) return image_fw, filename_fw, image_bw, filename_bw
def tf_read_raw(image_path): image = tf.image.decode_png(tf.read_file(image_path), dtype = tf.uint16) return tf.expand_dims(image, 0)
def conv2SaveSparse(chkPt,outDir): #conver weights to sparse format with tf.Session() as sess: saver = tf.train.import_meta_graph(chkPt+".meta") saver.restore(sess,"./"+chkPt) lay_name = [v.name for v in tf.trainable_variables() if (v.name.endswith("_w:0"))] for v in lay_name: print(v) curLay = [a for a in tf.trainable_variables() if (a.name==v)] wt = curLay[0].eval() print("np:",np.where(wt!=0)[0].shape) ind = tf.where(tf.not_equal(wt, 0)) sparse = tf.SparseTensor(ind, tf.gather_nd(wt, ind), curLay[0].get_shape()) tmp = sess.run(sparse) valName = outDir+v+"spVal.npy" print(valName) with open(valName,'wb') as f: np.save(f,tmp[1]) valName = outDir+v+"spMatSize.npy" print(valName) with open(valName,'wb') as f: np.save(f,tmp[2]) print("tmp",[tmp[0].shape,tmp[0].dtype,tmp[1].shape,tmp[2]]) indMat64 = tmp[0] castIndMat64 = tf.cast(indMat64,tf.uint16) indMat16 = sess.run(castIndMat64) print("intMat16:",[indMat16.shape,indMat16.dtype]) valName = outDir+v+"spInd16.npy" print(valName) with open(valName,'wb') as f: np.save(f,tmp[0])
def _produce_one_sample(self): dirname = os.path.dirname(self.path) if not check_dir(dirname): raise ValueError("Invalid data path.") with open(self.path, 'r') as fid: flist = [l.strip() for l in fid.xreadlines()] if self.shuffle: random.shuffle(flist) input_files = [os.path.join(dirname, 'input', f) for f in flist] output_files = [os.path.join(dirname, 'output', f) for f in flist] self.nsamples = len(input_files) input_queue, output_queue = tf.train.slice_input_producer( [input_files, output_files], shuffle=self.shuffle, seed=0123, num_epochs=self.num_epochs) if '16-bit' in magic.from_file(input_files[0]): input_dtype = tf.uint16 input_wl = 65535.0 else: input_wl = 255.0 input_dtype = tf.uint8 if '16-bit' in magic.from_file(output_files[0]): output_dtype = tf.uint16 output_wl = 65535.0 else: output_wl = 255.0 output_dtype = tf.uint8 input_file = tf.read_file(input_queue) output_file = tf.read_file(output_queue) if os.path.splitext(input_files[0])[-1] == '.jpg': im_input = tf.image.decode_jpeg(input_file, channels=3) else: im_input = tf.image.decode_png(input_file, dtype=input_dtype, channels=3) if os.path.splitext(output_files[0])[-1] == '.jpg': im_output = tf.image.decode_jpeg(output_file, channels=3) else: im_output = tf.image.decode_png(output_file, dtype=output_dtype, channels=3) # normalize input/output sample = {} with tf.name_scope('normalize_images'): im_input = tf.to_float(im_input)/input_wl im_output = tf.to_float(im_output)/output_wl inout = tf.concat([im_input, im_output], 2) fullres, inout = self._augment_data(inout, 6) sample['lowres_input'] = inout[:, :, :3] sample['lowres_output'] = inout[:, :, 3:] sample['image_input'] = fullres[:, :, :3] sample['image_output'] = fullres[:, :, 3:] return sample
def convert(): focal_length = 532.740352 width = 1280 height = 760 pad_width = int(focal_length * 2.0) pad_height = int(focal_length * 2.0) tf_rgb_filenames = tf.placeholder(tf.string, [4]) tf_depth_filenames = tf.placeholder(tf.string, [4]) rgbs = [tf_read_png(tf_rgb_filenames[index]) for index in range(4)] depths = [tf_read_raw(tf_depth_filenames[index])[:, :, :, 0:1] for index in range(4)] rgbs.extend([tf.zeros([1, height, width, 3], tf.float32) for _ in range(2)]) depths.extend([tf.zeros([1, height, width, 1], tf.uint16) for _ in range(2)]) cubic_rgbs = [pad_and_crop(rgb, width, height, pad_width, pad_height) for rgb in rgbs] cubic_depths = [pad_and_crop(tf.cast(depth, tf.float32), width, height, pad_width, pad_height) for depth in depths] cubic_depths = [backproject_cubic_depth(cubic_depths[index], [1, pad_height, pad_width], face_map[index]) for index in range(6)] tf_equirectangular_rgb = encode_image(cubic_to_equirectangular(cubic_rgbs, [256, 512]), "png") tf_equirectangular_depth = cubic_to_equirectangular(cubic_depths, [256, 512]) tf_preview_depth = encode_image(tf.log(1.0 + tf_equirectangular_depth), "png") tf_equirectangular_depth = tf.squeeze(tf_equirectangular_depth[:, :, :, 0]) session = tf.Session() if not os.path.exists(os.path.join(arguments.output_path, "rgb")): os.makedirs(os.path.join(arguments.output_path, "rgb")) if not os.path.exists(os.path.join(arguments.output_path, "depth")): os.makedirs(os.path.join(arguments.output_path, "depth")) if not os.path.exists(os.path.join(arguments.output_path, "preview")): os.makedirs(os.path.join(arguments.output_path, "preview")) for index in range(arguments.frames): rgb_filenames = [os.path.join(arguments.input_rgb, face, "{:06}.png".format(index)) for face in arguments.faces.split(",")] depth_filenames = [os.path.join(arguments.input_depth, face, "{:06}.png".format(index)) for face in arguments.faces.split(",")] if arguments.preview: equirectangular_rgb, equirectangular_depth, preview_depth = session.run([tf_equirectangular_rgb, tf_equirectangular_depth, tf_preview_depth], feed_dict = {tf_rgb_filenames: rgb_filenames, tf_depth_filenames: depth_filenames}) write_image(equirectangular_rgb, os.path.join(arguments.output_path, "rgb", "{:06}.png".format(index))) write_image(preview_depth, os.path.join(arguments.output_path, "preview", "{:06}.png".format(index))) np.save(os.path.join(arguments.output_path, "depth", "{:06}.npy".format(index)), equirectangular_depth) else: equirectangular_rgb, equirectangular_depth = session.run([tf_equirectangular_rgb, tf_equirectangular_depth], feed_dict = {tf_rgb_filenames: rgb_filenames, tf_depth_filenames: depth_filenames}) write_image(equirectangular_rgb, os.path.join(arguments.output_path, "rgb", "{:06}.png".format(index))) np.save(os.path.join(arguments.output_path, "depth", "{:06}.npy".format(index)), equirectangular_depth)
def get_data_kitti(datadir, shuffle_all, batchs): """Construct input data lists for Kitti 2012 Evaluation""" sintel_imgs_1 = "image_2_crop/" sintel_flows = "flow_occ_crop/" with tf.name_scope('Input'): # after number 154 image sizes change list_0 = sorted(glob.glob(datadir + sintel_imgs_1 + '/*10.png')) list_1 = sorted(glob.glob(datadir + sintel_imgs_1 + '/*11.png')) flow_list = sorted(glob.glob(datadir + sintel_flows + '/*.png')) print(len(list_0), len(list_1), len(flow_list)) print("Number of input length: " + str(len(list_0))) assert len(list_0) == len(list_1) == len( flow_list) != 0, ('Input Lengths not correct') if shuffle_all: p = np.random.permutation(len(list_0)) else: p = np.arange(len(list_0)) list_0 = [list_0[i] for i in p] list_1 = [list_1[i] for i in p] flow_list = [flow_list[i] for i in p] input_queue = tf.train.slice_input_producer( [list_0, list_1, flow_list], shuffle=False) # shuffled before # image reader content_0 = tf.read_file(input_queue[0]) content_1 = tf.read_file(input_queue[1]) content_flow = tf.read_file(input_queue[2]) imgs_0 = tf.image.decode_png(content_0, channels=3) imgs_1 = tf.image.decode_png(content_1, channels=3) imgs_0 = tf.image.convert_image_dtype(imgs_0, dtype=tf.float32) imgs_1 = tf.image.convert_image_dtype(imgs_1, dtype=tf.float32) flows = tf.cast(tf.image.decode_png( content_flow, channels=3, dtype=tf.uint16), tf.float32) # set shape imgs_0.set_shape(FLAGS.img_shape) imgs_1.set_shape(FLAGS.img_shape) flows.set_shape(FLAGS.img_shape) return tf.train.batch([imgs_0, imgs_1, flows], batch_size=batchs #,num_threads=1 )