Python utils 模块,get_image() 实例源码

我们从Python开源项目中,提取了以下5个代码示例,用于说明如何使用utils.get_image()

项目:fast-neural-style    作者:coder-james    | 项目源码 | 文件源码
def get_style_features(FLAGS):
    """
    For the "style_image", the preprocessing step is:
    1. Resize the shorter side to FLAGS.image_size
    2. Apply central crop
    """
    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    with tf.Graph().as_default(), tf.Session(config=config) as sess:
      network_fn = nets_factory.get_network_fn(
          FLAGS.loss_model,
          num_classes=1,
          is_training=False)

      image_preprocessing_fn = preprocessing_factory.get_preprocessing(
          FLAGS.loss_model,
          is_training=False)

      images = tf.expand_dims(utils.get_image(FLAGS.style_image, FLAGS.image_size, FLAGS.image_size, image_preprocessing_fn), 0)
      _, endpoints_dict = network_fn(images)

      features = []
      for layer in FLAGS.style_layers:
          feature = endpoints_dict[layer]
          features.append(gram(feature))

      init_func = utils._get_init_fn(FLAGS)
      init_func(sess)
      if os.path.exists('generated') is False:
          os.makedirs('generated')
      save_file = 'generated/target_style_' + FLAGS.naming + '.jpg'
      with open(save_file, 'wb') as f:
          target_image = unprocess_image(images[0, :])
          value = tf.image.encode_jpeg(tf.cast(target_image, tf.uint8))
          f.write(sess.run(value))
          tf.logging.info('Target style pattern is saved to: %s.' % save_file)
      return sess.run(features)
项目:vae-gan-tensorflow    作者:zhangqianhui    | 项目源码 | 文件源码
def _read_by_function(self, filename):

        array = get_image(filename, 108, is_crop=True, resize_w=self.output_size,
                           is_grayscale=False)
        real_images = np.array(array)
        return real_images
项目:bone-age    作者:radinformatics    | 项目源码 | 文件源码
def main():
    parser = get_parser()

    try:
        args = parser.parse_args()
    except:
        sys.exit(0)

    # if environment logging variable not set, make silent
    if args.debug == False:
        os.environ['MESSAGELEVEL'] = "CRITICAL"

    # Tell the user what is going to be used, in case is incorrect
    from logman import bot
    from predict_image import Model
    from utils import get_image, write_json
    print("\n*** Starting Bone Age Prediction ****")

    # Get the gender
    is_male = True
    if args.gender == "F":
        is_male = False

    # If the user has not provided an image, use an example
    image = args.image
    if image == None:
        print("No image selected, will use provided example...")
        from utils import select_example_image
        image = select_example_image(start=0,end=9)
        is_male = True # all examples male

    # Print parameters for user
    bot.logger.debug("is_male: %s", is_male)
    bot.logger.debug("image: %s", image)
    bot.logger.debug("height: %s", args.height)
    bot.logger.debug("width: %s", args.width)

    # Get the array of data (uint8) - H/W should be set to 256
    image_path = image
    image = get_image(image_path=image,
                      warped_height=args.height,
                      warped_width=args.width)

    print("Building model, please wait.")
    model = Model()
    result = model.get_result(image=image,
                              image_path=image_path,
                              is_male=is_male)

    print('Predicted Age : %d Months' %result['predicted_age'])
    print('Weighted Prediction : %f Months' %result['predicted_weight'])

    if args.output != None:
        output = write_json(json_object=result,
                            filename=args.output)        
        bot.logger.debug('Result written to %s',args.output)
项目:streetview    作者:ydnaandy123    | 项目源码 | 文件源码
def main(argv):
    pattern = "/home/ian/imagenet/ILSVRC2012_img_train_t1_t2/n*/*JPEG"
    files = glob(pattern)
    assert len(files) > 0
    assert len(files) > 1000000, len(files)

    dirs = glob("/home/ian/imagenet/ILSVRC2012_img_train_t1_t2/n*")
    assert len(dirs) == 1000, len(dirs)
    dirs = [d.split('/')[-1] for d in dirs]
    dirs = sorted(dirs)
    str_to_int = dict(zip(dirs, range(len(dirs))))


    outfile = '/media/NAS_SHARED/imagenet/imagenet_train_labeled_' + str(IMSIZE) + '.tfrecords'
    writer = tf.python_io.TFRecordWriter(outfile)

    for i, f in enumerate(files):
        print i
        image = get_image(f, IMSIZE, is_crop=True, resize_w=IMSIZE)
        image = colorize(image)
        assert image.shape == (IMSIZE, IMSIZE, 3)
        image += 1.
        image *= (255. / 2.)
        image = image.astype('uint8')
        #print image.min(), image.max()
        # from pylearn2.utils.image import save
        # save('foo.png', (image + 1.) / 2.)
        image_raw = image.tostring()
        class_str = f.split('/')[-2]
        label = str_to_int[class_str]
        if i % 1 == 0:
            print i, '\t',label
        example = tf.train.Example(features=tf.train.Features(feature={
            'height': _int64_feature(IMSIZE),
            'width': _int64_feature(IMSIZE),
            'depth': _int64_feature(3),
            'image_raw': _bytes_feature(image_raw),
            'label': _int64_feature(label)
            }))
        writer.write(example.SerializeToString())

    writer.close()
项目:fast-neural-style    作者:coder-james    | 项目源码 | 文件源码
def main(_):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    image = Image.open(FLAGS.image_file)
    image = np.asarray(image)
    height = image.shape[0]
    width = image.shape[1]
    channel = image.shape[2]
    tf.logging.info('Image size: %dx%dx%d' % (width, height, channel))

    with tf.Graph().as_default():
        with tf.Session(config=config).as_default() as sess:
            image_preprocessing_fn = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            rawimage = utils.get_image(FLAGS.image_file, 256, 256, image_preprocessing_fn)
            rawimage = tf.expand_dims(rawimage, 0)
            rawimage = tf.to_float(rawimage)
            if FLAGS.model_type == "transform":
              generated = transform_model.net(rawimage, training=False)
            elif FLAGS.model_type == "super":
              generated = sr_model.net(rawimage, scale=FLAGS.image_scale, training=False)
        elif FLAGS.model_type == "alipay":
          generated = al_model.net(rawimage, training=False)
            generated = tf.squeeze(generated, [0])
            saver = tf.train.Saver(tf.global_variables())
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            FLAGS.model_file = os.path.abspath(FLAGS.model_file)
            saver.restore(sess, FLAGS.model_file)

            start_time = time.time()
            generated = sess.run(generated)
            print(generated.shape)
            end_time = time.time()
            tf.logging.info('Elapsed time: %fs' % (end_time - start_time))
        if FLAGS.same_shape:
        generated = tf.image.resize_images(generated, [height, width])
            generated = tf.cast(generated, tf.uint8)
            generated_file = 'generated/aares_%s.jpg' % (FLAGS.model_type)
            if os.path.exists('generated') is False:
                os.makedirs('generated')
            with open(generated_file, 'wb') as img:
                img.write(sess.run(tf.image.encode_jpeg(generated)))
        tf.logging.info('generated Image size: %s' % (generated.get_shape()))
                tf.logging.info('Done. Please check %s.' % generated_file)