我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用tensorflow.python.platform.gfile.IsDirectory()。
def read_decode_tfrecords(records_path, num_epochs=1020, batch_size=Flags.batch_size, num_threads=2): if gfile.IsDirectory(records_path): records_path = [os.path.join(records_path, i) for i in os.listdir(records_path)] else: records_path = [records_path] records_path_queue = tf.train.string_input_producer(records_path, seed=123, num_epochs=num_epochs, name="string_input_producer") reader = tf.TFRecordReader() _, serialized_example = reader.read(records_path_queue, name="serialized_example") features = tf.parse_single_example(serialized=serialized_example, features={"img_raw": tf.FixedLenFeature([], tf.string), "label": tf.FixedLenFeature([], tf.int64), "height": tf.FixedLenFeature([], tf.int64), "width": tf.FixedLenFeature([], tf.int64), "depth": tf.FixedLenFeature([], tf.int64)}, name="parse_single_example") image = tf.decode_raw(features["img_raw"], tf.uint8, name="decode_raw") image.set_shape([height * width * 3]) image = tf.cast(image, tf.float32) * (1.0 / 255) - 0.5 label = tf.cast(features["label"], tf.int32) images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_threads, name="shuffle_bath", capacity=1020, min_after_dequeue=64) print("images' shape is :", str(images.shape)) return images, labels
def read_decode_tfrecords(records_path, num_epochs=1, batch_size=Flags.batch_size, num_threads=1): if gfile.IsDirectory(records_path): records_path = [os.path.join(records_path, i) for i in os.listdir(records_path)] else: records_path = [records_path] records_path_queue = tf.train.string_input_producer(records_path, seed=123, num_epochs=None, name="string_input_producer") reader = tf.TFRecordReader() _, serialized_example = reader.read(records_path_queue, name="serialized_example") features = tf.parse_single_example(serialized=serialized_example, features={"img_raw": tf.FixedLenFeature([], tf.string), "label": tf.FixedLenFeature([], tf.int64), "height": tf.FixedLenFeature([], tf.int64), "width": tf.FixedLenFeature([], tf.int64), "depth": tf.FixedLenFeature([], tf.int64)}, name="parse_single_example") image = tf.decode_raw(features["img_raw"], tf.uint8, name="decode_raw") image.set_shape([IMAGE_PIXELS]) image = tf.cast(image, tf.float32) * (1.0 / 255) - 0.5 label = tf.cast(features["label"], tf.int32) images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_threads, name="shuffle_bath", capacity=1020, min_after_dequeue=50) return images, labels
def read_decode_tfrecords(records_path, num_epochs=1020, batch_size=Flags.batch_size, num_threads=2): if gfile.IsDirectory(records_path): records_path = [os.path.join(records_path, i) for i in os.listdir(records_path)] else: records_path = [records_path] records_path_queue = tf.train.string_input_producer(records_path, seed=123, # num_epochs=num_epochs, name="string_input_producer") reader = tf.TFRecordReader() _, serialized_example = reader.read(records_path_queue, name="serialized_example") features = tf.parse_single_example(serialized=serialized_example, features={"img_raw": tf.FixedLenFeature([], tf.string), "label": tf.FixedLenFeature([], tf.int64), "height": tf.FixedLenFeature([], tf.int64), "width": tf.FixedLenFeature([], tf.int64), "depth": tf.FixedLenFeature([], tf.int64)}, name="parse_single_example") image = tf.decode_raw(features["img_raw"], tf.uint8, name="decode_raw") image.set_shape([IMAGE_PIXELS]) image = tf.cast(image, tf.float32) * (1.0 / 255) - 0.5 label = tf.cast(features["label"], tf.int32) # images, labels = tf.train.shuffle_batch([image, label], batch_size=batch_size, num_threads=num_threads, # name="shuffle_bath", capacity=1020, min_after_dequeue=64) return image, label
def _get_checkpoint_filename(filepattern): """Returns checkpoint filename given directory or specific filepattern.""" if gfile.IsDirectory(filepattern): return saver.latest_checkpoint(filepattern) return filepattern
def _copy_dir(dir_in, dir_out): gfile.MakeDirs(dir_out) for name in gfile.ListDirectory(dir_in): name_in = os.path.join(dir_in, name) name_out = os.path.join(dir_out, name) if gfile.IsDirectory(name_in): gfile.MakeDirs(name_out) _copy_dir(name_in, name_out) else: gfile.Copy(name_in, name_out, overwrite=True)
def _has_new_checkpoint(path, last): new_files = {} for f in gfile.ListDirectory(path): if not gfile.IsDirectory(f): try: global_step = extract_global_step(f) except Exception: # pylint: disable=broad-except continue if global_step and (not last or global_step > last): new_files[global_step] = f if new_files == {}: return last, None min_global_step = min(new_files) return min_global_step, new_files[min_global_step]