Python scipy.ndimage 模块,imread() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用scipy.ndimage.imread()

项目:BilibiliDraw    作者:TotoriKira    | 项目源码 | 文件源码
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("rwby.bmp", mode='RGB')

    print(len(im_array), len(im_array))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
项目:tf-image-interpreter    作者:ThoughtWorksInc    | 项目源码 | 文件源码
def _generate_batch(self, meta):
    image = ndimage.imread(meta.image_path)
    height, width, _ = meta.shape
    if height > width:
      scale = self._image_scale_size / width
    else:
      scale = self._image_scale_size / height

    # TODO: the dimensions in caffe is (batch elem, channel, height, width)
    resized_image = ndimage.zoom(image, (scale, scale, 1))
    bboxes = np.empty((len(meta.objects), 5))
    for i, obj in enumerate(meta.objects):
      bboxes[i][:4] = obj['bbox']
      bboxes[i][4] = obj['class_index']

    return np.expand_dims(resized_image, 0), scale, bboxes
项目:deeplab_v1_tf1.0    作者:automan000    | 项目源码 | 文件源码
def read_labeled_image_list(data_dir, data_list):
    """Reads txt file containing paths to images and ground truth masks.

    Args:
      data_dir: path to the directory with images and masks.
      data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.

    Returns:
      Two lists with all file names for images and masks, respectively.
    """
    f = open(data_list, 'r')
    images = []
    masks = []
    shape = []

    for line in f:
        image, mask = line.strip("\n").split(' ')
        images.append(data_dir + image)
        shape.append(ndimage.imread(data_dir + image).shape[:2])
        masks.append(data_dir + mask)
    return images, masks, shape
项目:pyoptflow    作者:scivision    | 项目源码 | 文件源码
def demo(stem):
    flist = getimgfiles(stem)
    ext = flist[0].suffix

    for i in range(len(flist)-1):
        fn1 = f'{stem}.{i}{ext}'
        im1 = imread(fn1,flatten=True).astype(float)  #flatten=True is rgb2gray
 #       Iold = gaussian_filter(Iold,FILTER)

        fn2 = f'{stem}.{i+1}{ext}'
        im2 = imread(fn2,flatten=True).astype(float)
#        Inew = gaussian_filter(Inew,FILTER)

        U,V = HornSchunck(im1, im2, 1., 100)
        compareGraphs(U,V, im2)

    return U,V
项目:pyoptflow    作者:scivision    | 项目源码 | 文件源码
def demo(stem, kernel=5,Nfilter=7):
    flist = getimgfiles(stem)
    ext = flist[0].suffix
#%% priming read
    im1 = imread(f'{stem}.0{ext}', flatten=True)
    Y,X = im1.shape
#%% evaluate the first frame's POI
    POI = getPOI(X,Y,kernel)
#% get the weights
    W = gaussianWeight(kernel)
#%% loop over all images in directory
    for i in range(1,len(flist)):
        im2 = imread(f'{stem}.{i}{ext}', flatten=True)
        im2 = gaussian_filter(im2, Nfilter)

        V = LucasKanade(im1, im2, POI, W, kernel)

        compareGraphsLK(im1, im2, POI, V)

        im1 = im2
项目:arc-pytorch    作者:sanyam5    | 项目源码 | 文件源码
def omniglot_folder_to_NDarray(path_im):
    alphbts = os.listdir(path_im)
    ALL_IMGS = []

    for alphbt in alphbts:
        chars = os.listdir(os.path.join(path_im, alphbt))
        for char in chars:
            img_filenames = os.listdir(os.path.join(path_im, alphbt, char))
            char_imgs = []
            for img_fn in img_filenames:
                fn = os.path.join(path_im, alphbt, char, img_fn)
                I = imread(fn)
                I = np.invert(I)
                char_imgs.append(I)
            ALL_IMGS.append(char_imgs)

    return np.array(ALL_IMGS)
项目:One-Shot-Learning-Demo    作者:llSourcell    | 项目源码 | 文件源码
def load_img_as_points(filename):
    # Load image file and return coordinates of black pixels in the binary image
    #
    # Input
    #  filename : string, absolute path to image
    #
    # Output:
    #  D : [n x 2] rows are coordinates
    #
    I = imread(filename, flatten=True)
    # Convert to boolean array and invert the pixel values
    I = ~np.array(I, dtype=np.bool)
    # Create a new array of all the non-zero element coordinates
    D = np.array(I.nonzero()).T
    return D - D.mean(axis=0)


# Main function
项目:SLAM    作者:sanjeevkumar42    | 项目源码 | 文件源码
def get_rgbd_file(self, dirname, offset):
        associations = self.seq_dir_map[dirname]['associations']

        if associations[offset, 1].startswith('depth'):
            rgb_filename = os.path.join(dirname, associations[offset, 3])
            depth_filename = os.path.join(dirname, associations[offset, 1])
        else:
            rgb_filename = os.path.join(dirname, associations[offset, 1])
            depth_filename = os.path.join(dirname, associations[offset, 3])

        rgb_img = ndimage.imread(rgb_filename)
        depth_img = ndimage.imread(depth_filename)
        width = height = 224

        # Reshape
        depth_img = np.reshape(depth_img, list(depth_img.shape) + [1])
        depth_img = 255 * depth_img / np.max(depth_img)

        rgbd_img = np.concatenate((rgb_img, depth_img), 2)

        # Resize
        rgbd_img = transform.resize(rgbd_img, [width, height], preserve_range=True)

        return rgb_filename, depth_filename, rgbd_img.astype(np.float32)
项目:SLAM    作者:sanjeevkumar42    | 项目源码 | 文件源码
def read_rgb_image(filepath):
    rgb_img = ndimage.imread(filepath)
    width = height = 224
    img_width = rgb_img.shape[1]
    img_height = rgb_img.shape[0]

    # scale such that smaller dimension is 256
    if img_width < img_height:
        factor = 256.0 / img_width
    else:
        factor = 256.0 / img_height
    rgb_img = transform.rescale(rgb_img, factor, preserve_range=True)

    # crop randomly
    width_start = np.random.randint(0, rgb_img.shape[1] - width)
    height_start = np.random.randint(0, rgb_img.shape[0] - height)

    rgb_img = rgb_img[height_start:height_start + height, width_start:width_start + width]
    return rgb_img
项目:cnn-bnn    作者:jpdz    | 项目源码 | 文件源码
def rotate_save(path):
  filepath = os.path.join(os.getcwd(),path)
  images_files = os.listdir(filepath)
  num_img = 0
  for i,image in enumerate(images_files):
    image_file =  os.path.join(filepath,image)
    try:
      img = ndimage.imread(image_file)
      img = img[:,:,0]
      new_im = img.fromarray(img)
      for j in range(0,360,72):
        im = new_im.rotate(j)
        filename = "%s%05d.png"%(path,num_img)
        im.save(filename)
        num_img+=1
    except Exception as e:
      print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def load_next_image(self):
        """
        Loads next image from train index for training.
        :return: True if the next image is present, else False
        """
        if len(self.image_list) == self.image_ptr:
            return False
        sys.stderr.write('Loaded Image #' + str(self.image_ptr) + ' ...\n')
        self.image = ndimage.imread(self.image_list[self.image_ptr])
        is_color = self.__check_color()
        if is_color:
            self.image = rgb2gray(self.image)

        assert self.image.shape == (256, 256), 'Image not 256 x 256'
        self.__break_into_jigzaw_pieces()
        self.image_ptr += 1
        self.tries = 1

        return True
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def load_next_image(self):
        """
        Loads next image from train index for training.
        :return: True if the next image is present, else False
        """
        if len(self.image_list) == self.image_ptr:
            return False
        print 'Loaded New Image'
        self.image = ndimage.imread(self.image_list[self.image_ptr])
        self.image_name = self.image_list[self.image_ptr]

        is_color = self.__check_color()
        if is_color:
            self.image = rgb2gray(self.image)

        assert self.image.shape == (256, 256), 'Image not 256 x 256'
        self.image_ptr += 1

        return True
项目:superpixelDepth    作者:slundqui    | 项目源码 | 文件源码
def getMeanVar(self):
        depths = None
        for f in self.depthFiles:
            depthImg = imread(f).astype(np.float32)/256
            validDepths = depthImg[np.nonzero(depthImg != 0)]
            if(depths == None):
                depths = validDepths
            else:
                depths = np.concatenate((depths, validDepths))
        self.mean = np.mean(depths)
        self.std = np.std(depths)
        print "depth mean: ", self.mean
        print "depth std: ", self.std

    #Function to return new image and depth file
    #TODO generate random ranking and randomize images
项目:LSH_Memory    作者:RUSH-LAB    | 项目源码 | 文件源码
def crawl_directory(directory, augment_with_rotations=False, first_label=0):
  """Crawls data directory and returns stuff."""
  label_idx = first_label
  images = []
  labels = []
  info = []

  # traverse root directory
  for root, _, files in os.walk(directory):
    logging.info('Reading files from %s', root)

    for file_name in files:
      full_file_name = os.path.join(root, file_name)
      img = imread(full_file_name, flatten=True)
      for idx, angle in enumerate([0, 90, 180, 270]):
        if not augment_with_rotations and idx > 0:
          break

        images.append(imrotate(img, angle))
        labels.append(label_idx + idx)
        info.append(full_file_name)

    if len(files) == 20:
      label_idx += 4 if augment_with_rotations else 1
  return images, labels, info
项目:SerialPhotoMerge    作者:simon-r    | 项目源码 | 文件源码
def read(self, file_name=None):

        if file_name != None:
            self.file_name = file_name
        elif self.file_name != None:
            pass
        else:
            raise Exception(" %s , Undefined file name: " %
                            sys._getframe().f_code.co_name)

        img_rgb = Image(color_depth=8)

        self.raw = ndimage.imread(self.file_name)
        img_rgb.image = np.array(self.raw, dtype=img_rgb.dtype)

        return img_rgb
项目:SerialPhotoMerge    作者:simon-r    | 项目源码 | 文件源码
def read(self, file_name=None):

        if file_name != None:
            self.file_name = file_name
        elif self.file_name != None:
            pass
        else:
            raise Exception(" %s , Undefined file name: " %
                            sys._getframe().f_code.co_name)

        rgb = Image(color_depth=16)
        raw = None

        with rawpy.imread(self.file_name) as raw:
            self.raw = raw.postprocess(output_bps=16)
            rgb.image = np.array(self.raw, dtype=rgb.dtype)

        return rgb
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def read_data(self, train_split=0.80, dev_split=0.10, test_split=0.10):
        """
        Class function to read images from `self.image_dir`and split them into three groups: train/dev/test.
        """
        assert (train_split + dev_split + test_split == 1.0)

        all_images = glob.glob(self.image_dir + "*.png")
        data = []

        for image_path in all_images:
            image = imread(image_path, flatten=True)
            image = image.reshape(IMAGE_WIDTH*IMAGE_HEIGHT)
            # image = np.multiply(image, 1.0 / 255.0) No scaling here

            data.append(image)

        data = np.array(data)
        data = data.astype(np.uint8)

        total_images = data.shape[0]

        train_limit = int(total_images * train_split)
        dev_limit = train_limit + int(total_images * dev_split)

        self.train = data[:train_limit]
        self.dev = data[train_limit:dev_limit]
        self.test = data[dev_limit:]

        # Only shuffling training data.
        random.shuffle(self.train)

        self.data_dict = {
            'train':    self.train,
            'dev':      self.dev,
            'test':     self.test}
项目:BilibiliDraw    作者:TotoriKira    | 项目源码 | 文件源码
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("greytech.png", mode='RGB')

    print(len(im_array), len(im_array[0]))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
项目:BilibiliDraw    作者:TotoriKira    | 项目源码 | 文件源码
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("ustc.bmp", mode='RGB')

    print(len(im_array), len(im_array))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
项目:BilibiliDraw    作者:TotoriKira    | 项目源码 | 文件源码
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("greytech.png", mode='RGB')

    print(len(im_array), len(im_array[0]))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
项目:BilibiliDraw    作者:TotoriKira    | 项目源码 | 文件源码
def main():
    '''
        ????????
    '''

    im_array = ndimage.imread("ms.bmp", mode='RGB')

    print(len(im_array), len(im_array))

    color = set()

    for i in im_array:
        for j in i:
            color.add(tuple(j))

    #  tmp = [[0 for i in range(len(im_array[0]))] for j in range(len(im_array))]
    #
    #  for i in range((len(im_array))):
    #      for j in range(len(im_array[0])):
    #          print(str(tuple(im_array[i][j])))
    #          if str(tuple(im_array[i][j]))!= "(255, 255, 255)":
    #              tmp[i][j]=(0,0,0)
    #          else:
    #              tmp[i][j]=im_array[i][j]
    #
    #  misc.imsave("test.bmp", tmp)


    print('{')
    for i in color:
        print("\"{0}\":,".format(i))
    print('}')

    #  for noi,i in enumerate(im_array):
    #      for noj,j in enumerate(i):
    #          print("Row:%d Col:%d  color: %s" %(noi, noj, j))
项目:logodetect    作者:munibasad    | 项目源码 | 文件源码
def load_logo(data_dir):
    image_files = os.listdir(data_dir)
    dataset = np.ndarray(
        shape=(len(image_files), CNN_IN_HEIGHT, CNN_IN_WIDTH, CNN_IN_CH),
        dtype=np.float32)
    print(data_dir)
    num_images = 0
    for image in image_files:
        image_file = os.path.join(data_dir, image)
        try:
            image_data = (ndimage.imread(image_file).astype(float) -
                          PIXEL_DEPTH / 2) / PIXEL_DEPTH
            if image_data.shape != (CNN_IN_HEIGHT, CNN_IN_WIDTH, CNN_IN_CH):
                raise Exception('Unexpected image shape: %s' %
                                str(image_data.shape))
            dataset[num_images, :, :] = image_data
            num_images = num_images + 1
        except IOError as e:
            print('Could not read:', image_file, ':', e,
                  '-it\'s ok, skipping.')

    dataset = dataset[0:num_images, :, :]
    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset
项目:images-web-crawler    作者:amineHorseman    | 项目源码 | 文件源码
def reshape_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and reshape them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Resizing '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.reshape_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    image_resized = misc.imresize(image, (height, width))
                    misc.imsave(target_folder + "/" + filename, image_resized)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            image_resized = misc.imresize(image, (height, width))
                            misc.imsave(target_folder + "/" + filename, image_resized)
项目:images-web-crawler    作者:amineHorseman    | 项目源码 | 文件源码
def crop_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and center crop them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and crop:
        print("Cropping '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.crop_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    [width_original, height_original, _] = image.shape
                    offset_w = (width_original - width) / 2
                    offset_h = (width_original - width) / 2
                    image_cropped = image[offset_w : width + offset_w, offset_h : height + offset_h, :]
                    misc.imsave(target_folder + "/" + filename, image_cropped)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            [width_original, height_original, _] = image.shape
                            offset_w = (width_original - width) / 2
                            offset_h = (width_original - width) / 2
                            image_cropped = image[offset_w : width + offset_w, offset_h : height + offset_h, :]
                            misc.imsave(target_folder + "/" + filename, image_cropped)
项目:images-web-crawler    作者:amineHorseman    | 项目源码 | 文件源码
def convert_to_grayscale(cls, source_folder, target_folder,
                             extensions=('.jpg', '.jpeg', '.png')):
        """ convert images from RGB to Grayscale"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Convert '", source_folder, "' images to grayscale...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_grayscale(source_folder + '/' + filename,
                                         target_folder + '/' + filename,
                                         extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                    misc.imsave(target_folder + "/" + filename, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                            misc.imsave(target_folder + "/" + filename, image)
项目:images-web-crawler    作者:amineHorseman    | 项目源码 | 文件源码
def convert_format(cls, source_folder, target_folder,
                       extensions=('.jpg', '.jpeg', '.png'), new_extension='.jpg'):
        """ change images from one format to another (eg. change png files to jpeg) """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Change format of '", source_folder, "' files...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_format(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   extensions=extensions, new_extension=new_extension)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename + new_extension)
                    image = ndimage.imread(target_folder + "/" + filename + new_extension)
                    misc.imsave(target_folder + "/" + filename + new_extension, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            new_filename = os.path.splitext(filename)[0] + new_extension
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + new_filename)
                            image = ndimage.imread(target_folder + "/" + new_filename)
                            misc.imsave(target_folder + "/" + new_filename, image)
项目:images-web-crawler    作者:amineHorseman    | 项目源码 | 文件源码
def convert_to_array(cls, source_folder, target_folder, create_labels_file=False,
                       flatten=False, extensions=('.jpg', '.jpeg', '.png')):
        """ Read all images in subfolders and convert them to a single array """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and concatenate:
        print("Converting '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_array(source_folder + '/' + filename, target_folder, 
                    create_labels_file=create_labels_file, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    image = ndimage.imread(source_folder + "/" + filename, mode="RGB")
                    if (flatten):
                        cls.data.append(image.flatten())
                    else:
                        cls.data.append(image)
                    if create_labels_file:
                        cls.labels.append(source_folder.replace('/', '_'))
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            image = ndimage.imread(source_folder + "/" + filename, mode="RGB")
                            if (flatten):
                                cls.data.append(image.flatten())
                            else:
                                cls.data.append(image)
                            if create_labels_file:
                                cls.labels.append(source_folder.replace('/', '_'))
项目:FlowNetPytorch    作者:ClementPinard    | 项目源码 | 文件源码
def load_flow_from_png(png_path):
    return(imread(png_path)[:,:,0:2].astype(float) - 128)
项目:FlowNetPytorch    作者:ClementPinard    | 项目源码 | 文件源码
def KITTI_loader(root,path_imgs, path_flo):
    imgs = [os.path.join(root,path) for path in path_imgs]
    flo = os.path.join(root,path_flo)
    return [imread(img) for img in imgs],load_flow_from_png(flo)
项目:FlowNetPytorch    作者:ClementPinard    | 项目源码 | 文件源码
def default_loader(root, path_imgs, path_flo):
    imgs = [os.path.join(root,path) for path in path_imgs]
    flo = os.path.join(root,path_flo)
    return [imread(img).astype(np.float32) for img in imgs],load_flo(flo)
项目:auckland-ai-meetup-x-triage    作者:a-i-joe    | 项目源码 | 文件源码
def png_to_array(path, res=(256, 256)):
    img = imread(path, flatten=True)
    if (img.max() <= 0.0):
        raise ValueError("empty image. imgname: " + path)
    return preprocess_image(img, res)
项目:VDSR-Keras    作者:GeorgeSeif    | 项目源码 | 文件源码
def load_images(directory):
    images = []
    for root, dirnames, filenames in os.walk(directory):
        for filename in filenames:
            if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
                filepath = os.path.join(root, filename)
                image = ndimage.imread(filepath, mode="L")
                images.append(image)

    images = np.array(images)
    array_shape = np.append(images.shape[0:3], 1)
    images = np.reshape(images, (array_shape))

    return images
项目:ML-Project    作者:Shiam-Chowdhury    | 项目源码 | 文件源码
def load_letter(folder, min_num_images):
  """Load the data for a single letter label."""

  image_files = os.listdir(folder)
  dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
  print(folder)

  num_images = 0
  for image_index, image in enumerate(image_files):
    image_file = os.path.join(folder, image)
    try:
      image_data = (ndimage.imread(image_file).astype(float) -      # normalize data
                    pixel_depth / 2) / pixel_depth
      if image_data.shape != (image_size, image_size):
        raise Exception('Unexpected image shape: %s' % str(image_data.shape))
      dataset[num_images, :, :] = image_data
      num_images = num_images + 1
    except IOError as e:
      print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.') # skip unreadable files

  dataset = dataset[0:num_images, :, :]
  if num_images < min_num_images:                                   # check if a given min. no. of images
    raise Exception('Many fewer images than expected: %d < %d' %    # has been loaded
                    (num_images, min_num_images))

  print('Full dataset tensor:', dataset.shape)
  print('Mean:', np.mean(dataset))
  print('Standard deviation:', np.std(dataset))
  return dataset


# function to store the normalized tensors obtained from the load_letter function in
# .pickle files for later use
项目:neon_segnet    作者:NervanaSystems    | 项目源码 | 文件源码
def main():
    assert os.path.isdir(args.image_path), '%s directory not found' % args.mage_path

    for dataset in ['train', 'test', 'val']:
        out_dir_im = os.path.join(args.output_path, dataset)
        if not os.path.isdir(out_dir_im):
            os.makedirs(out_dir_im)

        out_dir_an = os.path.join(args.output_path, dataset + 'annot')
        if not os.path.isdir(out_dir_an):
            os.makedirs(out_dir_an)

        fid = open(os.path.join(args.output_path, '%s_images.csv' % dataset), 'w')
        # print header
        fid.write('image,labels\n')
        fns = glob(os.path.join(args.image_path, dataset, '*.png'))

        for fn in fns:

            fn_image = os.path.abspath(fn)
            fn_annot = os.path.split(fn_image)
            fn_annot = os.path.join(fn_annot[0] + 'annot', fn_annot[1])

            im = imread(fn_image)
            annot = imread(fn_annot)
            out_size = (256, 512)
            im = imresize(im, out_size)
            annot = imresize(annot, out_size, interp='nearest')


            fn_image_out = os.path.abspath(os.path.join(out_dir_im,
                                                        os.path.basename(fn_image)))
            fn_annot_out = os.path.abspath(os.path.join(out_dir_an,
                                                        os.path.basename(fn_image)))
            imsave(fn_image_out, im)
            imsave(fn_annot_out, annot)

            fid.write('%s,%s\n' %(fn_image_out, fn_annot_out))
        fid.close()
项目:Adversarial_Video_Generation    作者:dyelax    | 项目源码 | 文件源码
def get_test_frame_dims():
    img_path = glob(os.path.join(TEST_DIR, '*/*'))[0]
    img = imread(img_path, mode='RGB')
    shape = np.shape(img)

    return shape[0], shape[1]
项目:Adversarial_Video_Generation    作者:dyelax    | 项目源码 | 文件源码
def get_train_frame_dims():
    img_path = glob(os.path.join(TRAIN_DIR, '*/*'))[0]
    img = imread(img_path, mode='RGB')
    shape = np.shape(img)

    return shape[0], shape[1]
项目:Adversarial_Video_Generation    作者:dyelax    | 项目源码 | 文件源码
def get_full_clips(data_dir, num_clips, num_rec_out=1):
    """
    Loads a batch of random clips from the unprocessed train or test data.

    @param data_dir: The directory of the data to read. Should be either c.TRAIN_DIR or c.TEST_DIR.
    @param num_clips: The number of clips to read.
    @param num_rec_out: The number of outputs to predict. Outputs > 1 are computed recursively,
                        using the previously-generated frames as input. Default = 1.

    @return: An array of shape
             [num_clips, c.TRAIN_HEIGHT, c.TRAIN_WIDTH, (3 * (c.HIST_LEN + num_rec_out))].
             A batch of frame sequences with values normalized in range [-1, 1].
    """
    clips = np.empty([num_clips,
                      c.FULL_HEIGHT,
                      c.FULL_WIDTH,
                      (3 * (c.HIST_LEN + num_rec_out))])

    # get num_clips random episodes
    ep_dirs = np.random.choice(glob(os.path.join(data_dir, '*')), num_clips)

    # get a random clip of length HIST_LEN + num_rec_out from each episode
    for clip_num, ep_dir in enumerate(ep_dirs):
        ep_frame_paths = sorted(glob(os.path.join(ep_dir, '*')))
        start_index = np.random.choice(len(ep_frame_paths) - (c.HIST_LEN + num_rec_out - 1))
        clip_frame_paths = ep_frame_paths[start_index:start_index + (c.HIST_LEN + num_rec_out)]

        # read in frames
        for frame_num, frame_path in enumerate(clip_frame_paths):
            frame = imread(frame_path, mode='RGB')
            norm_frame = normalize_frames(frame)

            clips[clip_num, :, :, frame_num * 3:(frame_num + 1) * 3] = norm_frame

    return clips
项目:ml-deepranking    作者:urakozz    | 项目源码 | 文件源码
def img(image_file):
    rgb = ndimage.imread(image_file).astype(float)
    rgb = (rgb - 255.0 / 2) / 255.0
    return rgb
项目:ml-deepranking    作者:urakozz    | 项目源码 | 文件源码
def img(image_file):
    rgb = ndimage.imread(image_file).astype(float)
    rgb = (rgb - 255.0/2) / 255.0
    return rgb
项目:depth-semantic-fully-conv    作者:iapatil    | 项目源码 | 文件源码
def __getitem__(self, index):
        img_name = self.listing[index]

        input_dir,target_depth_dir,target_label_dir = self.data_dir

        input_im, target_depth_im,target_label_im = imread(os.path.join(input_dir,img_name)),\
                                                    imread(os.path.join(target_depth_dir,img_name[:-3]+'png')),\
                                                    imread(os.path.join(target_label_dir,img_name[:-3]+'png'))


        if self.co_transform is not None:
            input_im, target_depth_im,target_label_im = self.co_transform(input_im,target_depth_im,target_label_im)

        if self.input_transform is not None:
            input_im = self.input_transform(input_im)

        if self.target_depth_transform is not None :
            target_depth_im = self.target_depth_transform(target_depth_im)

        if self.target_labels_transform is not None :
            target_label_im = self.target_labels_transform(target_label_im)

        input_rgb_im = input_im
        input_depth_im  = torch.cat((target_depth_im,target_depth_im,target_depth_im),dim = 0)
        target_im = target_label_im

        return input_rgb_im,input_depth_im,target_im
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def load_cv_imgs(img_fns, img_sz=(256, 256), use_bgr=True):
  nb_channels = 3
  if not use_bgr:
    nb_channels = 1

  imgs = [
  ]  #np.ndarray((len(img_fns), img_sz[0], img_sz[1], nb_channels), np.float32)
  for i in range(len(img_fns)):
    try:
      im = cv2.imread(img_fns[i])
      if im is None:
        print 'cannot read image {}'.format(img_fns[i])
        continue
      if img_sz is not None:
        im = cv2.resize(im, img_sz)
      if use_bgr:
        imgs.append(im)
      else:
        # keep same dim
        curimg = np.ndarray((im.shape[0], im.shape[1], 1), np.uint8)
        curimg[:, :, 0] = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        imgs.append(curimg)
    except cv2.error as e:
      print 'img error: {}, {}'.format(img_fns[i], e.message)

  #print 'loaded {} cv images'.format(len(imgs))
  if len(imgs) == 0:
    print img_fns
  return np.asarray(imgs)


# img_fns is a numpy array with strings
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def load_scipy_imgs(img_fns, img_sz=(256, 256), use_bgr=True):
  nb_channels = 3
  if not use_bgr:
    nb_channels = 1

  imgs = [
  ]  #np.ndarray((len(img_fns), img_sz[0], img_sz[1], nb_channels), np.float32)
  for i in range(len(img_fns)):
    try:
      #im = cv2.imread(img_fns[i])
      import scipy.ndimage as sni
      im = sni.imread(img_fns[i])
      if im is None:
        continue
      if img_sz is not None:
        im = cv2.resize(im, img_sz)
      if use_bgr:
        imgs.append(im)
      else:
        # keep same dim
        curimg = np.ndarray((im.shape[0], im.shape[1], 1), np.uint8)
        curimg[:, :, 0] = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
        imgs.append(curimg)
    except cv2.error as e:
      print 'img error: {}, {}'.format(img_fns[i], e.message)
  #print 'loaded {} cv images'.format(len(imgs))
  return np.asarray(imgs)


# load images into a numpy array
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def load_crop_imgs(img_fns, img_bboxes, img_sz, use_bgr=True):
  nb_channels = 3
  if not use_bgr:
    nb_channels = 1
  imgs = np.ndarray((len(img_fns), nb_channels, img_sz[0], img_sz[1]),
                    np.float32)
  print imgs.shape
  for i in range(len(img_fns)):
    im = cv2.imread(img_fns[i])
    imcrop = np.ndarray((img_sz[0], img_sz[1], nb_channels), np.float32)
    xs, ys, xe, ye = img_bboxes[i][0], img_bboxes[i][1], img_bboxes[i][
        0] + img_bboxes[i][2], img_bboxes[i][1] + img_bboxes[i][3]
    # Check if im is bgr or grayscale here?
    if use_bgr:
      imcrop = im[xs:xe, ys:ye, :]
    else:
      imcrop = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
      im = imcrop[xs:xe, ys:ye]
    im = cv2.resize(imcrop, img_sz)
    if use_bgr:
      imgs[i, :, :, :] = im
    else:
      imgs[i, 0, :, :] = im
  return imgs


# load images into a numpy array
项目:deepmodels    作者:learningsociety    | 项目源码 | 文件源码
def compute_mean_img(img_fns, img_sz):
  mean = None
  count = len(img_fns)
  for i in range(len(img_fns)):
    cv_img = cv2.imread(img_fns[i])
    if cv_img is None:
      raise ValueError(img_fns[i] + ' image read error')
    new_img = cv2.resize(cv_img, img_sz)
    if mean is None:
      mean = new_img.astype(np.float32)
    else:
      mean += new_img
  mean = mean / count
  return mean.astype(np.uint8)
项目:blcf    作者:willard-yuan    | 项目源码 | 文件源码
def get_mask_frame( self, frame, dim=None  ):
        '''
        return mask and name of the query frame. OUTDATED FUNCTION
        '''
        filename = frame.replace(".src.", ".mask.")

        # read image
        ima = cv2.imread(filename)

        # make sure is a mask
        if len(ima.shape)>2:
            ima = ima[:,:,0]

        # binarise
        ima[ima >0]=1.0

        # check dims
        if dim is not None:
            if ima.shape[0]>ima.shape[1]:
                dim_ = dim
            else:
                dim_ = (dim[1], dim[0])
        else:
            dim_ = ima.shape[:2]

        mask_r = reshape_maps_zoom( np.expand_dims(ima, axis=0 ) , dim_).squeeze()
        mask_r[mask_r >0]=1.0

        return mask_r
项目:blcf    作者:willard-yuan    | 项目源码 | 文件源码
def get_image_frame( queries, topic, id_frame ):
        return imread( self.get_src_path_fromID(topic, id_frame) )




###########################################################
# Additional functions
###########################################################
项目:notmnist    作者:aidiary    | 项目源码 | 文件源码
def load_letter(letter_dir, min_num_images):
    """Load the data for a single letter label."""
    image_files = os.listdir(letter_dir)
    # (num image, image width, image height)
    dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
    image_index = 0
    print(letter_dir)
    for image in image_files:
        image_file = os.path.join(letter_dir, image)
        try:
            # normalize image to [-0.5, 0.5]
            image_data = (ndimage.imread(image_file).astype(float) -
                          pixel_depth / 2) / pixel_depth
            if image_data.shape != (image_size, image_size):
                raise Exception('Unexpected image shape: %s' % str(image_data.shape))
            dataset[image_index, :, :] = image_data
            image_index += 1
        except IOError as e:
            print('Could not read:', image_file, ':', e, "- it's ok, skipping.")

    num_images = image_index
    dataset = dataset[0:num_images, :, :]
    if num_images < min_num_images:
        raise Exception('Many fewer images than expected: %d < %d'
                        % (num_images, min_num_images))

    print('Full dataset tensor:', dataset.shape)
    print('Mean:', np.mean(dataset))
    print('Standard deviation:', np.std(dataset))
    return dataset
项目:notmnist    作者:aidiary    | 项目源码 | 文件源码
def draw_images(root_dir):
    """Draw sample images for each class"""
    assert len(root_dir) == num_classes  # A to J
    num_cols = 10
    pos = 1
    for i in range(num_classes):
        target_dir = root_dir[i]
        for j in range(num_cols):
            plt.subplot(num_classes, num_cols, pos)
            random_file = random.choice(os.listdir(target_dir))
            image = misc.imread(os.path.join(target_dir, random_file))
            plt.imshow(image, cmap=plt.get_cmap('gray'))
            plt.axis('off')
            pos += 1
    plt.show()
项目:tensorflow_image_tutorial    作者:ybenoit    | 项目源码 | 文件源码
def load_letter(folder, min_num_images, image_size, pixel_depth):
        """Load the data for a single letter label."""
        image_files = os.listdir(folder)
        dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                             dtype=np.float32)
        image_index = 0
        print(folder)
        for image in os.listdir(folder):
            image_file = os.path.join(folder, image)
            try:
                image_data = (ndimage.imread(image_file).astype(float) -
                              pixel_depth / 2) / pixel_depth
                if image_data.shape != (image_size, image_size):
                    raise Exception('Unexpected image shape: %s' % str(image_data.shape))
                dataset[image_index, :, :] = image_data
                image_index += 1
            except IOError as e:
                print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')

        num_images = image_index
        dataset = dataset[0:num_images, :, :]
        if num_images < min_num_images:
            raise Exception('Many fewer images than expected: %d < %d' %
                            (num_images, min_num_images))

        print('Full dataset tensor:', dataset.shape)
        print('Mean:', np.mean(dataset))
        print('Standard deviation:', np.std(dataset))
        return dataset
项目:ccvt    作者:inconvergent    | 项目源码 | 文件源码
def get_dens_from_img(fn):

  return 1.0-imread(fn)/255.