Python skimage.io 模块,imread() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用skimage.io.imread()

项目:stegasawus    作者:rokkuran    | 项目源码 | 文件源码
def crop_images(path_images, path_output, dimensions, centre=True):
    """
    Batch crop images from top left hand corner to dimensions specified. Skips
    images where dimensions are incompatible.
    """
    print 'cropping images...'
    for i, filename in enumerate(os.listdir(path_images)):
        try:
            image = io.imread('{}{}'.format(path_images, filename))
            cropped = crop_image(image, dimensions, centre=centre)
            io.imsave(
                fname='{}{}'.format(path_output, filename),
                arr=cropped
            )
            print '{}: {}'.format(i, filename)
        except IndexError:
            print '{}: {} failed - dimensions incompatible'.format(i, filename)

    print 'all images cropped and saved.'
项目:chinese_generation    作者:polaroidz    | 项目源码 | 文件源码
def batch_generator(batch_size, nb_batches):
    batch_count = 0

    while True:
        pos = batch_count * batch_size
        batch = dataset[pos:pos+batch_size]

        X = np.zeros((batch_size, 1, img_size, img_size), dtype=np.float32)

        for k, path in enumerate(batch):
            im = io.imread(path)
            im = color.rgb2gray(im)

            X[k] = im[np.newaxis, ...]

        X = torch.from_numpy(X)
        X = Variable(X)

        yield X, batch

        batch_count += 1

        if batch_count > nb_batches:
            batch_count = 0
项目:kaggle-right-whale    作者:felixlaumon    | 项目源码 | 文件源码
def get_cropped_test_img(fname, bbox_pred, pad=None, as_grey=False, return_bbox=False):
    img = imread(fname, as_grey=as_grey)
    h = img.shape[0]
    w = img.shape[1]
    bbox_pred = bbox_pred * [w, h, w, h]
    bbox_pred = np.round(bbox_pred).astype(int)
    l = min(max(bbox_pred[0], 0), w)
    t = min(max(bbox_pred[1], 0), h)
    r = min(max(l + bbox_pred[2], 0), w)
    b = min(max(t + bbox_pred[3], 0), h)

    if pad is not None:
        l, t, r, b = add_padding_to_bbox(
            l, t, (r - l), (b - t), pad / 100.0,
            img.shape[1], img.shape[0],
            format='ltrb'
        )
    cropped_img = img[t:b, l:r]

    if return_bbox:
        return cropped_img, bbox_pred
    else:
        return cropped_img
项目:kaggle-right-whale    作者:felixlaumon    | 项目源码 | 文件源码
def get_cropped_test_img(fname, bbox_pred, pad=None, as_grey=False, return_bbox=False):
    img = imread(fname, as_grey=as_grey)
    h = img.shape[0]
    w = img.shape[1]
    bbox_pred = bbox_pred * [w, h, w, h]
    bbox_pred = np.round(bbox_pred).astype(int)
    l = min(max(bbox_pred[0], 0), w)
    t = min(max(bbox_pred[1], 0), h)
    r = min(max(l + bbox_pred[2], 0), w)
    b = min(max(t + bbox_pred[3], 0), h)

    if pad is not None:
        l, t, r, b = add_padding_to_bbox(
            l, t, (r - l), (b - t), pad / 100.0,
            img.shape[1], img.shape[0],
            format='ltrb'
        )
    cropped_img = img[t:b, l:r]

    if return_bbox:
        return cropped_img, bbox_pred
    else:
        return cropped_img
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def load_image(self, path):
        self.data = imread(path)
        self.path = path
        self._load_image_mask()
        self._mask_out_common_obstructions()
        self._rectify_image()

        self.driving_layers = driving.process_strip(channels_first(self.rectified * 255))
        self.facade_layers = i12.process_strip(channels_first(self.rectified * 255))

        self._create_sky_mask()
        self._segment_windows()
        self._segment_facade_edges()

        facade_cuts = self._split_at_facade_edges()
        facade_mask = self._create_facade_mask()
        wall_colors = self._mask_out_wall_colors(facade_mask)
        self.wall_colors = wall_colors

        self.facade_candidates = self._find_facade_candidates(wall_colors, facade_cuts)
项目:HSISeg    作者:HSISeg    | 项目源码 | 文件源码
def get_data_from_image(image_path):
#   from osgeo import gdal
    from skimage import io
    if image_path.split(".")[1] == "tif":
        M = io.imread(image_path)
#       dataset = gdal.Open(image_path,gdal.GA_ReadOnly)
#       col = dataset.RasterXSize
#       row = dataset.RasterYSize
#       a = [[[]for y in xrange(col)] for z in xrange(row)]
#       for i in xrange(1,dataset.RasterCount + 1):
#           band = dataset.GetRasterBand(i).ReadAsArray()
#           for m in xrange(0,row):
#               for n in xrange(0,col):
#                   a[m][n].append(band[m][n])
#       M = np.array(a,dtype='uint16')
    else:
        M = np.asarray(Image.open(image_path))
    return M
项目:checkmymeat    作者:kendricktan    | 项目源码 | 文件源码
def predict(url):
    global model      
    # Read image
    image = io.imread(url)
    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
    image = cv2.resize(image, (500, 500), interpolation=cv2.INTER_CUBIC)    

    # Use otsu to mask
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    ret, mask = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    mask = cv2.medianBlur(mask, 5)

    features = describe(image, mask)

    state = le.inverse_transform(model.predict([features]))[0]
    return {'type': state}
项目:train-CRF-RNN    作者:martinkersner    | 项目源码 | 文件源码
def convert2lmdb(path_src, src_imgs, ext, path_dst, class_ids, preprocess_mode, im_sz, data_mode):
  if os.path.isdir(path_dst):
    print('DB ' + path_dst + ' already exists.\n'
          'Skip creating ' + path_dst + '.', file=sys.stderr)
    return None

  if data_mode == 'label':
    lut = create_lut(class_ids)

  db = lmdb.open(path_dst, map_size=int(1e12))

  with db.begin(write=True) as in_txn:
    for idx, img_name in enumerate(src_imgs):
      #img = imread(os.path.join(path_src + img_name)+ext)
      img = np.array(Image.open(os.path.join(path_src + img_name)+ext))
      img = img.astype(np.uint8)

      if data_mode == 'label':
        img = preprocess_label(img, lut, preprocess_mode, im_sz)
      elif data_mode == 'image':
        img = preprocess_image(img, preprocess_mode, im_sz)

      img_dat = caffe.io.array_to_datum(img)
      in_txn.put('{:0>10d}'.format(idx), img_dat.SerializeToString())
项目:pythonml    作者:nicholastoddsmith    | 项目源码 | 文件源码
def LoadData(FP = '.'):
    '''
    Loads the OCR dataset. A is matrix of images (NIMG, Height, Width, Channel).
    Y is matrix of characters (NIMG, MAX_CHAR)
    FP:     Path to OCR data folder
    return: Data Matrix, Target Matrix, Target Strings
    '''
    TFP = os.path.join(FP, 'Train.csv')
    A, Y, T, FN = [], [], [], []
    with open(TFP) as F:
        for Li in F:
            FNi, Yi = Li.strip().split(',')                     #filename,string
            T.append(Yi)
            A.append(imread(os.path.join(FP, 'Out', FNi)))
            Y.append(list(Yi) + [' '] * (MAX_CHAR - len(Yi)))   #Pad strings with spaces
            FN.append(FNi)
    return np.stack(A), np.stack(Y), np.stack(T), np.stack(FN)
项目:Imagyn    作者:zevisert    | 项目源码 | 文件源码
def pil_to_skimage(img):
    """
    Convert PIL image to a Skimage image
    :param img: PIL image object
    :return: Skimage image object
    """
    # Get the absolute path of the working directory
    abspath = os.path.dirname(__file__)

    # Create a temp file to store the image
    temp = tempfile.NamedTemporaryFile(suffix=".jpg", delete=False, dir=abspath)

    # Save the image into the temp file
    img.save(temp.name, 'JPEG')

    # Read the image as a SciKit image object
    ski_img = io.imread(temp.name, plugin='pil')

    # Close the file
    temp.close()

    # Delete the file
    os.remove(temp.name)

    return ski_img
项目:adascan_public    作者:amlankar    | 项目源码 | 文件源码
def flowList(xFileNames, yFileNames):
    '''
    (x/y)fileNames: List of the fileNames in order to get the flows from
    '''

    frameList = []

    if (len(xFileNames) != len(yFileNames)):
        print 'XFILE!=YFILE ERROR: In', xFileNames[0]

    for i in range(0, min(len(xFileNames), len(yFileNames))):
        imgX = io.imread(xFileNames[i])
        imgY = io.imread(yFileNames[i])
        frameList.append(np.dstack((imgX, imgY)))

    frameList = np.array(frameList)
    return frameList
项目:tefla    作者:openAGI    | 项目源码 | 文件源码
def plot_masks(cropped_image_path, prediction_map, output_image_path):
    fig = plt.figure("segments")
    ax = fig.add_subplot(1, 1, 1)
    image_draw = io.imread(cropped_image_path)
    segparams = SegParams()
    feature_mapping = segparams.feature_palette()
    classes = segparams.feature_classes()
    legend_patches = []
    for i in feature_mapping.keys():
        if i in prediction_map:
            temp_inds = np.where(prediction_map != i)
            temp_map = prediction_map.copy()
            temp_map[temp_inds] = 0
            image_draw = mark_boundaries(
                image_draw, temp_map, mode='inner', color=feature_mapping[i])  # outline_color=feature_mapping[i])
            legend_patches.append(mpatches.Patch(
                color=(feature_mapping[i][0], feature_mapping[i][1], feature_mapping[i][2], 1), label=classes[i]))
    ax.imshow(image_draw)
    lgd = ax.legend(handles=legend_patches,
                    loc="upper left", bbox_to_anchor=(1, 1))
    plt.axis("off")
    plt.savefig(output_image_path.strip('.jpg') + '_segmented.png', bbox_extra_artists=(
        lgd,), bbox_inches='tight')
    plt.show()
项目:hintbot    作者:madebyollin    | 项目源码 | 文件源码
def loadImages(datadir, maxDirectoryCount=10, split=0.9):
    for dirPath, dirNames, fileNames in os.walk(datadir):
        fileNames = [f for f in fileNames if not f[0] == '.']
        dirNames[:] = [d for d in dirNames if not d[0] == '.']
        if (maxDirectoryCount != 0):
            fullSizeFileNames = [fileName for fileName in fileNames if fileName.endswith("@2x.png") and (fileName.replace("@2x","") in fileNames)]
            for fullSizeFileName in fullSizeFileNames:
                inputImage = io.imread(dirPath + "/" + fullSizeFileName)
                targetImage = io.imread(dirPath + "/" + fullSizeFileName.replace("@2x",""))
                # print(dirPath + "/" + fullSizeFileName)
                inputSlices, targetSlices = sliceImages(inputImage, targetImage)
                # print("got", len(inputSlices), "input splices and",len(targetSlices),"targetSlices")
                inputImages.extend(inputSlices)
                targetImages.extend(targetSlices)
            maxDirectoryCount -= 1
    x, y = np.asarray(inputImages), np.asarray(targetImages)
    x_train = x[:int(len(x) * split)]
    y_train = y[:int(len(y) * split)]
    x_test = x[int(len(x) * split):]
    y_test = y[int(len(y) * split):]
    # Shuffle training data so that repeats aren't in the same batch
    # x_train, y_train = shuffle(x_train, y_train, random_state=0)
    return (x_train, y_train, x_test, y_test)
项目:TF-Examples    作者:CharlesShang    | 项目源码 | 文件源码
def load_img(self, img_filename):
        """
        Summary:
            Load image from the filename. Default is to load in color if
            possible.

        Args:
            img_name (string): string of the image name, relative to
                the image directory.

        Returns:
            np array of float32: an image as a numpy array of float32
        """
        if not img_filename.endswith('.jpg'):
            img_filename = os.path.join(self.img_dir, img_filename + '.jpg')
        else:
            img_filename = os.path.join(self.img_dir, img_filename)
        img = skimage.img_as_float(io.imread(
            img_filename)).astype(np.float32)
        if img.ndim == 2:
            img = img[:, :, np.newaxis]
        elif img.shape[2] == 4:
            img = img[:, :, :3]
        return img
项目:CancerImageAnalyzer2    作者:byeungchun    | 项目源码 | 文件源码
def findRemSmObjValue(_biImageFile="E:/workspace/jinyoung/CancerImageAnalyzer/img/ppimg1601101508/thimg1601101511/BF_position020100_time0001hVal0.4thVal0.9.png"):
    _remSmObjOutputPath = '/remSmObjImg'+datetime.datetime.today().strftime('%y%m%d%H%M')+'/'
    remSmObjImageFileName = os.path.basename(_biImageFile)
    biImageFilePath = os.path.dirname(_biImageFile)

    reSmObjImageFilePath = biImageFilePath + _remSmObjOutputPath

    biImg = imread(_biImageFile)
    biImgRsize = biImg.shape[0] * 0.1
    biImgCsize = biImg.shape[1] * 0.1
    biImg = biImg[biImgRsize:-biImgRsize, biImgCsize:-biImgCsize]
    biImg = ndimage.binary_fill_holes(biImg)
    for smObjVal in np.arange(0,100000,10000):
        filledImg = cia.removeSmallObject(biImg, minSize=smObjVal)
        if not os.path.exists(reSmObjImageFilePath):
            os.mkdir(reSmObjImageFilePath)
        biImageFileName = remSmObjImageFileName[:remSmObjImageFileName.rfind('.')]+'smObjVal'+str(smObjVal)+'.png'
        imsave(reSmObjImageFilePath+biImageFileName, filledImg)



#findHvalue()
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess for Caffe:
    - resize image
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
    idx=idx.split()[0]
    try:
            im = Image.open('{}/{}'.format(self.data_dir, idx))
    except:
        from skimage import io  
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
    im=im.resize((self.width, self.height), Image.ANTIALIAS)    # resize image
        im = np.array(im, dtype=np.float32)             # cast to float
        im = im[:,:,::-1]                       # RGB -> BGR
        im -= self.mean                         # mean subtraction
    # bring colour to the innermost dimension
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess:
    - resize
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
        idx = self.indices[idx]
    idx = idx.split()[0]
    im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im = im.resize((self.width, self.height), Image.ANTIALIAS)   # resize image
        im = np.array(im, dtype=np.float32)              # cast to float
        im = im[:,:,::-1]                                            # RGB -> BGR
        im -= self.mean_bgr                      # mean subtraction
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_label(self, idx):
        """
        Load binary mask and preprocess:
    - resize
    - convert to greyscale
    - cast to integer
    - binarize
        """
        idx = self.indices_label[idx]
        idx=idx.split()[0]
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im=im.resize((self.width, self.height), Image.NEAREST)      # resize
        im=im.convert('L')                      # convert to greyscale
        im=np.array(im, dtype=(np.int32))               # cast to integer
        label=im
        label[label>0]=1                        # convert to binary
        label=np.array(label,np.uint8)
        label = label[np.newaxis, ...]
        return label
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_depth(self, idx):
        """
        Load depth map and preprocess:
    - resize
    - cast to float
    - subtract mean
        """
        idx = self.indices_depth[idx]
        idx=idx.split()[0]
        im = io.imread('{}/depth/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im = im.resize((self.width, self.height), Image.ANTIALIAS)  # resize
        im = np.array(im, dtype=np.float32)             # cast to float
        d = im
        d -= self.mean_depth                        # mean subtraction
        d = d[np.newaxis, ...]
        return d
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess for Caffe:
    - resize image
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
    idx=idx.split()[0]
    try:
            im = Image.open('{}/{}'.format(self.data_dir, idx))
    except:
        from skimage import io  
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
    im=im.resize((self.width, self.height), Image.ANTIALIAS)    # resize image
        im = np.array(im, dtype=np.float32)             # cast to float
        im = im[:,:,::-1]                       # RGB -> BGR
        im -= self.mean                         # mean subtraction
    # bring colour to the innermost dimension
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess:
    - resize
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
        idx = self.indices[idx]
    idx = idx.split()[0]
    im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im = im.resize((self.width, self.height), Image.ANTIALIAS)   # resize image
        im = np.array(im, dtype=np.float32)              # cast to float
        im = im[:,:,::-1]                                            # RGB -> BGR
        im -= self.mean_bgr                      # mean subtraction
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_label(self, idx):
        """
        Load binary mask and preprocess:
    - resize
    - convert to greyscale
    - cast to integer
    - binarize
        """
        idx = self.indices_label[idx]
        idx=idx.split()[0]
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im=im.resize((self.width, self.height), Image.NEAREST)      # resize
        im=im.convert('L')                      # convert to greyscale
        im=np.array(im, dtype=(np.int32))               # cast to integer
        label=im
        label[label>0]=1                        # convert to binary
        label=np.array(label,np.uint8)
        label = label[np.newaxis, ...]
        return label
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_depth(self, idx):
        """
        Load depth map and preprocess:
    - resize
    - cast to float
    - subtract mean
        """
        idx = self.indices_depth[idx]
        idx=idx.split()[0]
        im = io.imread('{}/depth/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im = im.resize((self.width, self.height), Image.ANTIALIAS)  # resize
        im = np.array(im, dtype=np.float32)             # cast to float
        d = im
        d -= self.mean_depth                        # mean subtraction
        d = d[np.newaxis, ...]
        return d
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess for Caffe:
    - resize image
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
    idx=idx.split()[0]
    try:
            im = Image.open('{}/{}'.format(self.data_dir, idx))
    except:
        from skimage import io  
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
    im=im.resize((self.width, self.height), Image.ANTIALIAS)    # resize image
        im = np.array(im, dtype=np.float32)             # cast to float
        im = im[:,:,::-1]                       # RGB -> BGR
        im -= self.mean                         # mean subtraction
    # bring colour to the innermost dimension
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess for Caffe:
    - resize image
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
    idx=idx.split()[0]
    try:
            im = Image.open('{}/{}'.format(self.data_dir, idx))
    except:
        from skimage import io  
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
    im=im.resize((self.width, self.height), Image.ANTIALIAS)    # resize image
        im = np.array(im, dtype=np.float32)             # cast to float
        im = im[:,:,::-1]                       # RGB -> BGR
        im -= self.mean                         # mean subtraction
    # bring colour to the innermost dimension
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_image(self, idx):
        """
        Load input image and preprocess:
    - resize
        - cast to float
        - switch channels RGB -> BGR
        - subtract mean
        - transpose to channel x height x width order
        """
        idx = self.indices[idx]
    idx = idx.split()[0]
    im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im = im.resize((self.width, self.height), Image.ANTIALIAS)   # resize image
        im = np.array(im, dtype=np.float32)              # cast to float
        im = im[:,:,::-1]                                            # RGB -> BGR
        im -= self.mean_bgr                      # mean subtraction
        im = im.transpose((2,0,1))
        return im
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_label(self, idx):
        """
        Load binary mask and preprocess:
    - resize
    - convert to greyscale
    - cast to integer
    - binarize
        """
        idx = self.indices_label[idx]
        idx=idx.split()[0]
        im = io.imread('{}/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im=im.resize((self.width, self.height), Image.NEAREST)      # resize
        im=im.convert('L')                      # convert to greyscale
        im=np.array(im, dtype=(np.int32))               # cast to integer
        label=im
        label[label>0]=1                        # convert to binary
        label=np.array(label,np.uint8)
        label = label[np.newaxis, ...]
        return label
项目:cnn_polyp_detection    作者:odysszis    | 项目源码 | 文件源码
def load_depth(self, idx):
        """
        Load depth map and preprocess:
    - resize
    - cast to float
    - subtract mean
        """
        idx = self.indices_depth[idx]
        idx=idx.split()[0]
        im = io.imread('{}/depth/{}'.format(self.data_dir, idx))
        im = Image.fromarray(im)
        im = im.resize((self.width, self.height), Image.ANTIALIAS)  # resize
        im = np.array(im, dtype=np.float32)             # cast to float
        d = im
        d -= self.mean_depth                        # mean subtraction
        d = d[np.newaxis, ...]
        return d
项目:poeai    作者:nicholastoddsmith    | 项目源码 | 文件源码
def SplitSave(self, p = 'TSD/Train/Images', wp = 'TSD/Train/Split'):
        '''
        #p:     #Dir contains images to split
        #wp:    #Dir to write split images  
        '''
        c = 0
        if not os.path.exists(wp):
            os.mkdir(wp)
        pdl = np.random.choice([fni for fni in os.listdir(p) if fni.startswith('di')], 32, replace = False)
        for i, fn in enumerate(pdl):
            print('{:4d}/{:4d}:\t{:s}'.format(i + 1, len(pdl), fn))
            #A = imread(os.path.join(p, fn))[0:-14, 1:-1]
            #A = self.GetScreen()
            #S = self.ts.DivideIntoSubimages(A).astype(np.uint8)
            A = imread(os.path.join(p, fn))[0:-12, 4:-4, :]
            S = self.ts.DivideIntoSubimages(A).astype(np.uint8)
            for i, Si in enumerate(S):
                imsave(os.path.join(wp, '{:03d}.png'.format(c)), Si)
                c += 1
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def load_images(self, test_list):

        """
            train_list : list of users to use for testing
            eg ["user_1", "user_2", "user_3"]
        """

        self.image_list = []

        for user in test_list:

            csv = "%s%s/%s_loc.csv" % (self.data_directory, user, user)

            with open(csv) as fh:
                data = [line.strip().split(',') for line in fh]

            for line in data[1:]:

                img_path, x1,y1,x2,y2, = line
                pos = tuple(map(int,(x1,y1,x2,y2)))
                letter = img_path[-6]

                img = io.imread("%s%s" % (self.data_directory, img_path))

                self.image_list.append((img, pos, letter))
项目:Food-Classification    作者:Tkd-Alex    | 项目源码 | 文件源码
def display_img_and_representation(x, y, pathimage, y_etichetta):


    print y[y_etichetta]

    img = sio.imread(pathimage)

    plt.figure(figsize=(12,4))

    plt.subplot(1,2,1)
    plt.imshow(img)

    plt.subplot(1,2,2)
    plt.plot(x)

    plt.show()
项目:Food-Classification    作者:Tkd-Alex    | 项目源码 | 文件源码
def describe_dataset(dataset,kmeans):
    y = list() 
    X = list() 
    paths = list() 

    classes=dataset.getClasses()

    ni = 0
    t1 = time()
    for cl in classes:
        for path in dataset.paths[cl]: 
            img = sio.imread(path,as_grey = True)
            feat = extract_and_describe(img,kmeans)
            X.append(feat)
            y.append(classes.index(cl)) 
            paths.append(path) 
            ni+= 1

    X = np.array(X)
    y = np.array(y)
    t2 = time()
    print "Elapsed time {0:0.2f}".format(t2-t1)
    return X,y,paths
项目:FCN_MSCOCO_Food_Segmentation    作者:gakarak    | 项目源码 | 文件源码
def readDataMasked(pidx):
    with open(pidx, 'r') as f:
        wdir = os.path.dirname(pidx)
        lstpath = f.read().splitlines()
        lstpath = [os.path.join(wdir,xx) for xx in lstpath]
        numPath = len(lstpath)
        dataX = None
        dataY = None
        for ii,pp in enumerate(lstpath):
            img4 = skio.imread(pp)
            img = img4[:,:,:3].astype(np.float)
            img -= img.mean()
            img /= img.std()
            msk = (img4[:,:,3]>0).astype(np.float)
            msk = np_utils.to_categorical(msk.reshape(-1), 2)
            # msk = msk.reshape(-1)
            if dataX is None:
                dataX = np.zeros([numPath] + list(img.shape))
                dataY = np.zeros([numPath] + list(msk.shape))
            dataX[ii] = img
            dataY[ii] = msk
            if (ii%100)==0:
                print ('[%d/%d]' % (ii, numPath))
        return (dataX, dataY)
项目:FCN_MSCOCO_Food_Segmentation    作者:gakarak    | 项目源码 | 文件源码
def readDataMasked(pidx):
    with open(pidx, 'r') as f:
        wdir = os.path.dirname(pidx)
        lstpath = f.read().splitlines()
        lstpath = [os.path.join(wdir,xx) for xx in lstpath]
        numPath = len(lstpath)
        dataX = None
        dataY = None
        for ii,pp in enumerate(lstpath):
            img4 = skio.imread(pp)
            img = img4[:,:,:3].astype(np.float)
            img -= img.mean()
            img /= img.std()
            msk = (img4[:,:,3]>0).astype(np.float)
            msk = np_utils.to_categorical(msk.reshape(-1), 2)
            # msk = msk.reshape(-1)
            if dataX is None:
                dataX = np.zeros([numPath] + list(img.shape))
                dataY = np.zeros([numPath] + list(msk.shape))
            dataX[ii] = img
            dataY[ii] = msk
            if (ii%100)==0:
                print ('[%d/%d]' % (ii, numPath))
        return (dataX, dataY)
项目:FCN_MSCOCO_Food_Segmentation    作者:gakarak    | 项目源码 | 文件源码
def getBatchDataByIdx(self, parBatchIdx):
        rndIdx = parBatchIdx
        parBatchSize = len(rndIdx)
        dataX = np.zeros([parBatchSize] + list(self.shapeImg), dtype=np.float)
        dataY = np.zeros([parBatchSize] + list(self.shapeMsk), dtype=np.float)
        for ii, tidx in enumerate(rndIdx):
            if self.isDataInMemory:
                dataX[ii] = self.dataImg[tidx]
                dataY[ii] = self.dataMskCls[tidx]
            else:
                tpathImg = self.arrPathDataImg[tidx]
                tpathMsk = self.arrPathDataMsk[tidx]
                tdataImg = self.adjustImage(skio.imread(tpathImg))
                tdataMsk = skio.imread(tpathMsk)
                tdataImg = self.transformImageFromOriginal(tdataImg)
                tdataMsk = self.transformImageFromOriginal(tdataMsk)
                tdataMskCls = self.convertMskToOneHot(tdataMsk)
                dataX[ii] = tdataImg
                dataY[ii] = tdataMskCls
        if self.isTheanoShape:
            tshp = dataY.shape
            dataY = dataY.reshape([tshp[0], tshp[1], np.prod(tshp[-2:])]).transpose((0, 2, 1))
            # print (tshp)
        return (dataX, dataY)
项目:color-extractor    作者:algolia    | 项目源码 | 文件源码
def get(self, uri):
        i = imread(uri)
        if len(i.shape) == 2:
            i = gray2rgb(i)
        else:
            i = i[:, :, :3]
        c = self._image_to_color.get(i)

        dbg = self._settings['debug']
        if dbg is None:
            return c

        c, imgs = c
        b = splitext(basename(uri))[0]
        imsave(join(dbg, b + '-resized.jpg'), imgs['resized'])
        imsave(join(dbg, b + '-back.jpg'), img_as_float(imgs['back']))
        imsave(join(dbg, b + '-skin.jpg'), img_as_float(imgs['skin']))
        imsave(join(dbg, b + '-clusters.jpg'), imgs['clusters'])

        return c, {
            'resized': join(dbg, b + '-resized.jpg'),
            'back': join(dbg, b + '-back.jpg'),
            'skin': join(dbg, b + '-skin.jpg'),
            'clusters': join(dbg, b + '-clusters.jpg'),
        }
项目:view-finding-network    作者:yiling-chen    | 项目源码 | 文件源码
def evaluate_sliding_window(img_filename, crops):
    img = io.imread(img_filename).astype(np.float32)/255
    if img.ndim == 2: # Handle B/W images
        img = np.expand_dims(img, axis=-1)
        img = np.repeat(img, 3, 2)

    img_crops = np.zeros((batch_size, 227, 227, 3))
    for i in xrange(len(crops)):
        crop = crops[i]
        img_crop = transform.resize(img[crop[1]:crop[1]+crop[3],crop[0]:crop[0]+crop[2]], (227, 227))-0.5
        img_crop = np.expand_dims(img_crop, axis=0)
        img_crops[i,:,:,:] = img_crop

    # compute ranking scores
    scores = sess.run([score_func], feed_dict={image_placeholder: img_crops})

    # find the optimal crop
    idx = np.argmax(scores[:len(crops)])
    best_window = crops[idx]

    # return the best crop
    return (best_window[0], best_window[1], best_window[2], best_window[3])
项目:ascii    作者:Tarnasa    | 项目源码 | 文件源码
def draw_blur_levels():
    import matplotlib.pyplot as plt
    from skimage import io

    image = io.imread('out/66.png')  # 36 for $, 79 for O

    fig, axes = plt.subplots(nrows=2, ncols=3,
            subplot_kw={'adjustable': 'box-forced'})
    ax = axes.ravel()

    for blur_level in range(6):
        blurred = uniform_filter(image, 3.0*blur_level, mode='reflect', cval=0)

        ax[blur_level].imshow(blurred, cmap='gray', interpolation='nearest')
        ax[blur_level].set_title(str(blur_level), fontsize=20)
    plt.show()
项目:dcn.tf    作者:beopst    | 项目源码 | 文件源码
def load_data(src,shuffle=True):
    """ Load data from directories.
    """

    imgs = [img for img in glob.glob(os.path.join(src,'*.png'))]

    x = np.zeros((len(imgs),100,100), dtype=np.float32)
    y = np.zeros(len(imgs), dtype=np.int64)

    for idx, img in enumerate(imgs):
        im = io.imread(img,1)
        im = img_as_float(im) # rescale from [0,255] to [0,1]

        label = int(img.split('/')[-1].split('.')[0].split('_')[-1])

        x[idx] = im
        y[idx] = label

    x = np.expand_dims(x,3)
    data = zip(x,y)

    if shuffle: random.shuffle(data)

    return data
项目:neural-art-mini    作者:pavelgonchar    | 项目源码 | 文件源码
def PreprocessContentImage(path, long_edge):
    img = io.imread(path)
    logging.info("load the content image, size = %s", img.shape[:2])
    factor = float(long_edge) / max(img.shape[:2])
    new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))
    resized_img = transform.resize(img, new_size)
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    sample[0, :] -= 123.68
    sample[1, :] -= 116.779
    sample[2, :] -= 103.939
    logging.info("resize the content image to %s", new_size)
    return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
项目:kaggle-yelp-restaurant-photo-classification    作者:u1234x1234    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=True):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 299, 299
    resized_img = transform.resize(crop_img, (299, 299))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (299, 299, 3) to (3, 299, 299)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    normed_img = sample - 128.
    normed_img /= 128.

    return np.reshape(normed_img, (1, 3, 299, 299))
项目:kaggle-yelp-restaurant-photo-classification    作者:u1234x1234    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=True):
    # load image
    img = io.imread(path)
#    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 299, 299
    resized_img = transform.resize(crop_img, (299, 299))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (299, 299, 3) to (3, 299, 299)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    normed_img = sample - 128.
    normed_img /= 128.

    return np.reshape(normed_img, (1, 3, 299, 299))
项目:news-shot-classification    作者:gshruti95    | 项目源码 | 文件源码
def cropframes(clip_dir, image_files, clip_path):

    clip = clip_path.split('/')[-1]
    clip_name = clip.split('.')[0]

    crop_dir = clip_dir + 'cropped/'
    # crop_dir = '/home/sxg755/dataset/train/all_frames/cropped/'
    if not os.path.exists(crop_dir):
        os.makedirs(crop_dir)

    cropped_files = []
    for idx, image in enumerate(image_files):   
        img = io.imread(image)
        h = img.shape[0]
        w = img.shape[1]
        img_cropped = img[0:4*h/5, 0:w]
        io.imsave(crop_dir + clip_name + '_keyframe' +  "{0:0>4}".format(idx+1) + '.jpg', img_cropped)
        cropped_files.append(crop_dir + clip_name + '_keyframe' +  "{0:0>4}".format(idx+1) + '.jpg')

    return cropped_files
项目:sign-detection-and-localization    作者:rajat503    | 项目源码 | 文件源码
def load_images(self, test_list):

        """
            train_list : list of users to use for testing
            eg ["user_1", "user_2", "user_3"]
        """

        self.image_list = []

        for user in test_list:

            csv = "%s%s/%s_loc.csv" % (self.data_directory, user, user)

            with open(csv) as fh:
                data = [line.strip().split(',') for line in fh]

            for line in data[1:]:

                img_path, x1,y1,x2,y2, = line
                pos = tuple(map(int,(x1,y1,x2,y2)))
                letter = img_path[-6]

                img = io.imread("%s%s" % (self.data_directory, img_path))

                self.image_list.append((img, pos, letter))
项目:sign-detection-and-localization    作者:rajat503    | 项目源码 | 文件源码
def train(user_list, path):
    train_data = []
    train_boxes =[]

    for user in user_list:
        with open(path+user+'/'+user+'_loc.csv', 'rb') as csvfile:
            x=csv.reader(csvfile)
            for row in x:
                if row[0]=='image':
                    continue
                image = io.imread(path+row[0])
                data_vector = image
                # data_vector = np.array(image.flatten()).tolist()
                # sys.exit(0)
                ground_truth = [int(row[1]), int(row[2]), int(row[3]), int(row[4])]

                user_id = int(user.split('_')[1])
                train_data.append(data_vector)
                train_boxes.append(ground_truth)


    localization.train(train_data, train_boxes)
# train(['user_3','user_4','user_5','user_6','user_7','user_9','user_10','user_11','user_12','user_13','user_14','user_15','user_16','user_17','user_18','user_19'])
# train(['user_3','user_4', 'user_5','user_6','user_7','user_9','user_10', 'user_11', 'user_12', 'user_13', 'user_14' ,'user_15', 'user_16', 'user_17', 'user_18','user_19'])
项目:Representation-Learning-by-Learning-to-Count    作者:gitlimlab    | 项目源码 | 文件源码
def __init__(self, ids, name='default',
                 max_examples=None, is_train=True):
        self._ids = list(ids)
        self.name = name
        self.is_train = is_train

        if max_examples is not None:
            self._ids = self._ids[:max_examples]

        file = os.path.join(__IMAGENET_IMG_PATH__, self._ids[0])

        try:
            imread(file)
        except:
            raise IOError('Dataset not found. Please make sure the dataset was downloaded.')
        log.info("Reading Done: %s", file)
项目:mxnet_tk1    作者:starimpact    | 项目源码 | 文件源码
def PreprocessImage(path, show_img=False):
    # load image
    img = io.imread(path)
    print("Original Image Shape: ", img.shape)
    # we crop image from center
    short_egde = min(img.shape[:2])
    yy = int((img.shape[0] - short_egde) / 2)
    xx = int((img.shape[1] - short_egde) / 2)
    crop_img = img[yy : yy + short_egde, xx : xx + short_egde]
    # resize to 224, 224
    resized_img = transform.resize(crop_img, (224, 224))
    if show_img:
        io.imshow(resized_img)
    # convert to numpy.ndarray
    sample = np.asarray(resized_img) * 255
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)

    # sub mean
    normed_img = sample - mean_img
    normed_img.resize(1, 3, 224, 224)
    return normed_img

# Get preprocessed batch (single image batch)
项目:mxnet_tk1    作者:starimpact    | 项目源码 | 文件源码
def PreprocessContentImage(path, long_edge):
    img = io.imread(path)
    logging.info("load the content image, size = %s", img.shape[:2])
    factor = float(long_edge) / max(img.shape[:2])
    new_size = (int(img.shape[0] * factor), int(img.shape[1] * factor))
    resized_img = transform.resize(img, new_size)
    sample = np.asarray(resized_img) * 256
    # swap axes to make image from (224, 224, 3) to (3, 224, 224)
    sample = np.swapaxes(sample, 0, 2)
    sample = np.swapaxes(sample, 1, 2)
    # sub mean
    sample[0, :] -= 123.68
    sample[1, :] -= 116.779
    sample[2, :] -= 103.939
    logging.info("resize the content image to %s", new_size)
    return np.resize(sample, (1, 3, sample.shape[1], sample.shape[2]))
项目:han    作者:croath    | 项目源码 | 文件源码
def get_real_images(paths):
    real_images = []
    for path in paths:
        # Calculate a threshold to do image binarization, all colors at every pixel will be translated to number 0(white) or 1(black)
        camera = io.imread(path)
        val = filters.threshold_otsu(camera)
        result = (camera < val)*1.0
        real_images.append(result)
    np_images = numpy.array(real_images)
    np_images = np_images.reshape(np_images.shape[0], np_images.shape[1] * np_images.shape[2])
    return np_images
项目:monogreedy    作者:jinjunqi    | 项目源码 | 文件源码
def read_coco_image(data_split, coco_id, root_dir=osp.join(DATA_ROOT, 'mscoco')):
    file_name = 'COCO_{}2014_'.format(data_split) + str(coco_id).zfill(12) + '.jpg'
    im = imread(osp.join(root_dir, data_split+'2014', file_name))
    return im