Python skimage.color 模块,rgb2gray() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用skimage.color.rgb2gray()

项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def my_label2rgboverlay(labels, colors, image, alpha=0.2):
    """
    Generates image with segmentation labels on top

    Parameters
    ----------
    labels:  labels of one image (0, 1)
    colors:  colormap
    image:   image (0, 1, c), where c=3 (rgb)
    alpha: transparency
    """
    image_float = gray2rgb(img_as_float(rgb2gray(image) if
                                        image.shape[2] == 3 else
                                        np.squeeze(image)))
    label_image = my_label2rgb(labels, colors)
    output = image_float * alpha + label_image * (1 - alpha)
    return output
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)
        if self.transform is not None:
            img_original = self.transform(img)
            img_original = np.asarray(img_original)

            img_lab = rgb2lab(img_original)
            img_lab = (img_lab + 128) / 255
            img_ab = img_lab[:, :, 1:3]
            img_ab = torch.from_numpy(img_ab.transpose((2, 0, 1)))
            img_original = rgb2gray(img_original)
            img_original = torch.from_numpy(img_original)
        if self.target_transform is not None:
            target = self.target_transform(target)
        return (img_original, img_ab), target
项目:colorNet-pytorch    作者:shufanwu    | 项目源码 | 文件源码
def __getitem__(self, index):
        path, target = self.imgs[index]
        img = self.loader(path)

        img_scale = img.copy()
        img_original = img
        img_scale = scale_transform(img_scale)

        img_scale = np.asarray(img_scale)
        img_original = np.asarray(img_original)

        img_scale = rgb2gray(img_scale)
        img_scale = torch.from_numpy(img_scale)
        img_original = rgb2gray(img_original)
        img_original = torch.from_numpy(img_original)
        return (img_original, img_scale), target
项目:chinese_generation    作者:polaroidz    | 项目源码 | 文件源码
def batch_generator(batch_size, nb_batches):
    batch_count = 0

    while True:
        pos = batch_count * batch_size
        batch = dataset[pos:pos+batch_size]

        X = np.zeros((batch_size, 1, img_size, img_size), dtype=np.float32)

        for k, path in enumerate(batch):
            im = io.imread(path)
            im = color.rgb2gray(im)

            X[k] = im[np.newaxis, ...]

        X = torch.from_numpy(X)
        X = Variable(X)

        yield X, batch

        batch_count += 1

        if batch_count > nb_batches:
            batch_count = 0
项目:tefla    作者:litan    | 项目源码 | 文件源码
def convert_new(fname, target_size):
    print('Processing image: %s' % fname)
    img = Image.open(fname)
    blurred = img.filter(ImageFilter.BLUR)
    ba = np.array(blurred)
    ba_gray = rgb2gray(ba)
    val = filters.threshold_otsu(ba_gray)
    # foreground = (ba_gray > val).astype(np.uint8)
    foreground = closing(ba_gray > val, square(3))
    # kernel = morphology.rectangle(5, 5)
    # foreground = morphology.binary_dilation(foreground, kernel)
    labels = measure.label(foreground)
    properties = measure.regionprops(labels)
    properties = sorted(properties, key=lambda p: p.area, reverse=True)
    # draw_top_regions(properties, 3)
    # return ba
    bbox = properties[0].bbox
    bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
    cropped = img.crop(bbox)
    resized = cropped.resize([target_size, target_size])
    return np.array(resized)
项目:tefla    作者:litan    | 项目源码 | 文件源码
def convert_new_regions(fname, target_size):
    print('Processing image: %s' % fname)
    img = Image.open(fname)
    blurred = img.filter(ImageFilter.BLUR)
    ba = np.array(blurred)
    ba_gray = rgb2gray(ba)
    val = filters.threshold_otsu(ba_gray)
    # foreground = (ba_gray > val).astype(np.uint8)
    foreground = closing(ba_gray > val, square(3))
    # kernel = morphology.rectangle(5, 5)
    # foreground = morphology.binary_dilation(foreground, kernel)
    labels = measure.label(foreground)
    properties = measure.regionprops(labels)
    properties = sorted(properties, key=lambda p: p.area, reverse=True)
    draw_top_regions(properties, 3)
    return ba
项目:tefla    作者:litan    | 项目源码 | 文件源码
def convert(fname, target_size):
    # print('Processing image: %s' % fname)
    img = Image.open(fname)
    blurred = img.filter(ImageFilter.BLUR)
    ba = np.array(blurred)
    ba_gray = rgb2gray(ba)
    val = filters.threshold_otsu(ba_gray)
    # foreground = (ba_gray > val).astype(np.uint8)
    foreground = closing(ba_gray > val, square(3))
    # kernel = morphology.rectangle(5, 5)
    # foreground = morphology.binary_dilation(foreground, kernel)
    labels = measure.label(foreground)
    properties = measure.regionprops(labels)
    properties = sorted(properties, key=lambda p: p.area, reverse=True)
    # draw_top_regions(properties, 3)
    # return ba
    bbox = properties[0].bbox
    bbox = (bbox[1], bbox[0], bbox[3], bbox[2])
    cropped = img.crop(bbox)
    resized = cropped.resize([target_size, target_size])
    return resized
项目:toothless    作者:ratt-ru    | 项目源码 | 文件源码
def fits2jpg(fname):
    hdu_list = fits.open(fname)
    image = hdu_list[0].data
    image = np.squeeze(image)
    img = np.copy(image)
    idx = np.isnan(img)
    img[idx] = 0
    img_clip = np.flipud(img)
    sigma = 3.0
    # Estimate stats
    mean, median, std = sigma_clipped_stats(img_clip, sigma=sigma, iters=10)
    # Clip off n sigma points
    img_clip = clip(img_clip,std*sigma)
    if img_clip.shape[0] !=150 or img_clip.shape[1] !=150:
        img_clip = resize(img_clip, (150,150))
    #img_clip = rgb2gray(img_clip)

    outfile = fname[0:-5] +'.png'
    imsave(outfile, img_clip)
    return img_clip,outfile




# Do the fusion classification
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def load_next_image(self):
        """
        Loads next image from train index for training.
        :return: True if the next image is present, else False
        """
        if len(self.image_list) == self.image_ptr:
            return False
        sys.stderr.write('Loaded Image #' + str(self.image_ptr) + ' ...\n')
        self.image = ndimage.imread(self.image_list[self.image_ptr])
        is_color = self.__check_color()
        if is_color:
            self.image = rgb2gray(self.image)

        assert self.image.shape == (256, 256), 'Image not 256 x 256'
        self.__break_into_jigzaw_pieces()
        self.image_ptr += 1
        self.tries = 1

        return True
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def load_next_image(self):
        """
        Loads next image from train index for training.
        :return: True if the next image is present, else False
        """
        if len(self.image_list) == self.image_ptr:
            return False
        print 'Loaded New Image'
        self.image = ndimage.imread(self.image_list[self.image_ptr])
        self.image_name = self.image_list[self.image_ptr]

        is_color = self.__check_color()
        if is_color:
            self.image = rgb2gray(self.image)

        assert self.image.shape == (256, 256), 'Image not 256 x 256'
        self.image_ptr += 1

        return True
项目:ml-traffic    作者:Zepheus    | 项目源码 | 文件源码
def postprocess(imgs, size, grayscale=False):
    print("Postprocessing images and resize (at %d)" % size)
    keyname = ('gray_%d' if grayscale else 'color_%d') % size
    for img in imgs:

        # Continue if already calculated
        if img.isSetByName(keyname):
            continue

        floatimg = img_as_float(img.image)
        floatimg = resize(floatimg, (size, size))
        if grayscale:
            floatimg = rgb2gray(floatimg)
        img.setByName(keyname, floatimg)  # expect to return floats


# Augment images
项目:Reinforcement_Learning    作者:jcwleo    | 项目源码 | 文件源码
def pre_proc(X):
    '''????? ???.

    Args:
        X(np.array): ??? ???? ??? ???? ? 84X84? ????
            ??? ????? ??????(??? ?? ??? ??) 255? ??

    Returns:
        np.array: ??? ???
    '''
    # ?? ? frame? ???? max? ????? flickering? ??
    # x = np.maximum(X, X1)
    # ??? ????? ????? ?? ??? ?? ??
    x = np.uint8(resize(rgb2gray(X), (HEIGHT, WIDTH), mode='reflect') * 255)

    return x
项目:Reinforcement_Learning    作者:jcwleo    | 项目源码 | 文件源码
def pre_proc(X):
    '''????? ???.

    Args:
        X(np.array): ??? ???? ??? ???? ? 84X84? ????
            ??? ????? ??????(??? ?? ??? ??) 255? ??

    Returns:
        np.array: ??? ???
    '''
    # ?? ? frame? ???? max? ????? flickering? ??
    # x = np.maximum(X, X1)
    # ??? ????? ????? ?? ??? ?? ??
    x = np.uint8(resize(rgb2gray(X), (HEIGHT, WIDTH), mode='reflect') * 255)

    return x
项目:qtim_ROP    作者:QTIM-Lab    | 项目源码 | 文件源码
def create_mask(im_arr, erode=0):

    if im_arr.shape[2] == 3:
        im_arr = rgb2gray(im_arr)

    thresh = 0.05
    inv_bin = np.invert(im_arr > thresh)
    all_labels = measure.label(inv_bin)

    # Select largest object and invert
    seg_arr = all_labels == 0

    if erode > 0:
        strel = selem.disk(erode, dtype=np.bool)
        seg_arr = binary_erosion(seg_arr, selem=strel)
    elif erode < 0:
        strel = selem.disk(abs(erode), dtype=np.bool)
        seg_arr = binary_dilation(seg_arr, selem=strel)

    return seg_arr.astype(np.bool)
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def load_img(path, grayscale=False, resize=None, order=1):
    # Load image
    img = io.imread(path)

    # Resize
    # print('Desired resize: ' + str(resize))
    if resize is not None:
        img = skimage.transform.resize(img, resize, order=order,
                                       preserve_range=True)
        # print('Final resize: ' + str(img.shape))

    # Color conversion
    if len(img.shape)==2 and not grayscale:
        img = gray2rgb(img)
    elif len(img.shape)>2 and img.shape[2]==3 and grayscale:
        img = rgb2gray(img)

    # Return image
    return img
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def predict_image(self, test_img):
        """
        predicts classes of input image
        :param test_img: filepath to image to predict on
        :param show: displays segmentation results
        :return: segmented result
        """
        img = np.array( rgb2gray( imread( test_img ).astype( 'float' ) ).reshape( 5, 216, 160 )[-2] ) / 256

        plist = []

        # create patches from an entire slice
        img_1 = adjust_sigmoid( img ).astype( float )
        edges_1 = adjust_sigmoid( img, inv=True ).astype( float )
        edges_2 = img_1
        edges_5_n = normalize( laplace( img_1 ) )
        edges_5_n = img_as_float( img_as_ubyte( edges_5_n ) )

        plist.append( extract_patches_2d( edges_1, (23, 23) ) )
        plist.append( extract_patches_2d( edges_2, (23, 23) ) )
        plist.append( extract_patches_2d( edges_5_n, (23, 23) ) )
        patches = np.array( zip( np.array( plist[0] ), np.array( plist[1] ), np.array( plist[2] ) ) )

        # predict classes of each pixel based on model
        full_pred = self.model.predict_classes( patches )
        fp1 = full_pred.reshape( 194, 138 )
        return fp1
项目:nn-segmentation-for-lar    作者:cvdlab    | 项目源码 | 文件源码
def predict_image(self, test_img):
        """
        predicts classes of input image
        :param test_img: filepath to image to predict on
        :return: segmented result
        """
        # imgs = io.imread(test_img).astype('float').reshape(5, 216, 160)
        imgs = mpimg.imread(test_img).astype('float')
        imgs = rgb2gray(imgs).reshape(5, 216, 160)

        plist = []

        # create patches_to_predict from an entire slice
        for img in imgs[:-1]:
            if np.max(img) != 0:
                img /= np.max(img)
            p = extract_patches_2d(img, (33, 33))
            plist.append(p)
        patches_to_predict = np.array(
            zip(np.array(plist[0]), np.array(plist[1]), np.array(plist[2]), np.array(plist[3])))

        # predict classes of each pixel based on model
        full_pred = self.model.predict_classes(patches_to_predict)
        fp1 = full_pred.reshape(184, 128)
        return fp1
项目:artorithmia    作者:alichtner    | 项目源码 | 文件源码
def extract_blur(self, plot=False):
        """
        Calculate the variance of the 2nd derivative of the image to get blur.

        Input:  plot (bool) whether or not to show the image after Laplacian
        Output: None"""
        # do on grayscale
        # check what the mean would give instead of variance
        self.bluriness = filters.laplace(color.rgb2gray(self.image)).var()
        if plot is True:
            sns.set_style("whitegrid", {'axes.grid': False})
            self.lap = filters.laplace(color.rgb2gray(self.image))
            plt.imshow(self.lap)
            plt.title('Laplacian of {}'.format(self.short_name))
            plt.show()
            plt.imshow(self.lap)
            plt.show()
项目:artorithmia    作者:alichtner    | 项目源码 | 文件源码
def extract_symmetry(self):
        """
        Calculate the symmetry of the image by substracting left from right.

        Input:  None
        Output: None
        """
        # currently this is only for horizontal symmetry
        if len(self.image.shape) == 3:
            height, width, _ = self.image.shape
        else:
            height, width = self.image.shape
        if width % 2 != 0:
            width -= 1
            pixels = height * width
            left = self.image[:, :width/2]
            right = self.image[:, width/2:-1]
        else:
            pixels = height * width
            left = self.image[:, :width/2]
            right = self.image[:, width/2:]
        left_gray = color.rgb2gray(left)
        right_gray = color.rgb2gray(right)
        self.symmetry = np.abs(left_gray -
                               np.fliplr(right_gray)).sum()/(pixels/1.*2)
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def load_img(path, grayscale=False, resize=None, order=1):
    # Load image
    img = io.imread(path)

    # Resize
    # print('Desired resize: ' + str(resize))
    if resize is not None:
        img = skimage.transform.resize(img, resize, order=order,
                                       preserve_range=True)
        # print('Final resize: ' + str(img.shape))

    # Color conversion
    if len(img.shape) == 2 and not grayscale:
        img = gray2rgb(img)
    elif len(img.shape) > 2 and img.shape[2] == 3 and grayscale:
        img = rgb2gray(img)

    # Return image
    return img
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def extract_pos_hog_features(path, num_samples):

    features = []
    cnt = 0
    for dirpath, dirnames, filenames in walk(path):
        for my_file in filenames:
            print path+my_file
            if cnt < num_samples:
                cnt = cnt + 1
                im = cv2.imread(path + my_file)
                print im.shape
                image = color.rgb2gray(im)
                image = image[17:145, 16:80]

                my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
                features.append(my_feature)
    return features
项目:CS231A_Project    作者:afazel    | 项目源码 | 文件源码
def extract_neg_hog_features(path, num_samples):

    features = []
    cnt = 0
    for dirpath, dirnames, filenames in walk(path):
        for my_file in filenames:
            if cnt < num_samples:
                cnt = cnt + 1
                im = cv2.imread(path + my_file)
                image = color.rgb2gray(im)
                image = image[17:145, 16:80]
                #cv2.imshow('test',image)
                #cv2.waitKey(0)
                my_feature, _ = hog(image, orientations=9, pixels_per_cell=(8, 8),cells_per_block=(2, 2), visualise=True)
                features.append(my_feature)
    return features
项目:pytorch-nec    作者:mjacar    | 项目源码 | 文件源码
def get_screen(self):
    screen = self.env.render(mode='rgb_array')
    screen = color.rgb2gray(screen)
    screen = imresize(screen, (110, 84))
    screen = screen[18:102][:] / 255.0
    return screen.astype(np.float)
项目:iFruitFly    作者:AdnanMuhib    | 项目源码 | 文件源码
def scaler(_imageFile):
    _scaled = color.rgb2gray(_imageFile);
    return _scaled;
项目:iFruitFly    作者:AdnanMuhib    | 项目源码 | 文件源码
def scaler(_imageFile):
    _scaled = color.rgb2gray(_imageFile);     
    return _scaled;
项目:facade-segmentation    作者:jfemiani    | 项目源码 | 文件源码
def _load_image_mask(self):
        # Sometimes an approximate mask can be produced based on Google range data
        # the mask indicates which parts of the image are not facade
        mask_path = os.path.join(os.path.dirname(self.path), 'mask.png')
        if self.use_mask and os.path.isfile(mask_path):
            self.data_mask = rgb2gray(imread(mask_path)) > 0.5
        else:
            self.data_mask = None
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def get_preprocessed_frame(self, observation):
        """
        0) Atari frames: 210 x 160
        1) Get image grayscale
        2) Rescale image 110 x 84
        3) Crop center 84 x 84 (you can crop top/bottom according to the game)
        """
        return resize(rgb2gray(observation), (110, 84))[13:110 - 13, :]
项目:Deep-Learning-with-Theano    作者:PacktPublishing    | 项目源码 | 文件源码
def save_observation(observation):
    global observations
    observations = np.roll(observations, -input_depth, axis=0)
    observations[-input_depth:, ...] = rgb2gray(imresize(observation, screen))[None, ...]
项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def _preprocess_observation(self, obs):
        # clop center
        return np.asarray(resize(rgb2gray(obs), (110, 84))[-84:, :]*255, dtype=np.uint8)

# Hyperparameters
# env_name = 'CartPole-v0'  # env to play
项目:DeepLearning    作者:Wanwannodao    | 项目源码 | 文件源码
def _preprocess_observation(self, obs):
        # clop center
        return np.asarray(resize(rgb2gray(obs), (110, 84))[-84:, :]*255, dtype=np.uint8)

# Hyperparameters
# env_name = 'CartPole-v0'  # env to play
项目:Imagyn    作者:zevisert    | 项目源码 | 文件源码
def seam_carve(img):
    """
    Seam carve image
    :param img: PIL image object
    :return: PIL image object
    """
    # Convert to skimage image
    img_to_convert = img.copy()
    img_to_convert = pil_to_skimage(img_to_convert)

    # Energy Map, used to determine which pixels will be removed
    eimg = filters.sobel(color.rgb2gray(img_to_convert))

    # (height, width)
    img_dimensions = img_to_convert.shape

    # Squish width if width >= height, squish height if height > width
    # Number of pixels to keep along the outer edges (5% of largest dimension)
    # Number of seams to be removed, (1 to 10% of largest dimension)
    if img_dimensions[1] >= img_dimensions[0]:
        mode = "horizontal"
        border = round(img_dimensions[1] * 0.05)
        num_seams = random.randint(1, round(0.1*img_dimensions[1]))

    else:
        mode = "vertical" 
        border = round(img_dimensions[0] * 0.05)
        num_seams = random.randint(1, round(0.1*img_dimensions[0]))

    try:
        img_to_convert = transform.seam_carve(img_to_convert, eimg, mode, num_seams, border)

    except Exception as e:
        print("Unable to seam_carve: " + str(e))

    # Convert back to PIL image
    img_to_convert = skimage_to_pil(img_to_convert)

    return img_to_convert
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def __init__(self, root, split):
        if split not in ['train', 'test', 'all']:
            raise ValueError

        dir = os.path.join(root, split)
        filenames = glob.glob(os.path.join(dir, '*.png'))

        if split == 'all':
            filenames = glob.glob(os.path.join(root, 'train/*.png'))
            filenames.extend(glob.glob(os.path.join(root, 'test/*.png')))

        filenames = sorted(
            filenames, key=lambda x: int(os.path.basename(x).split('.')[0]))

        images = []

        for f in filenames:
            img = plt.imread(f)
            img[img != 1] = 0
            images.append(resize(rgb2gray(img), [48, 48], mode='constant'))

        self.images = np.array(images, dtype=np.float32)
        self.images = self.images.reshape([len(images), 48, 48, 1])

        action_filename = os.path.join(root, 'actions.txt')

        with open(action_filename) as infile:
            actions = np.array([float(l) for l in infile.readlines()])

        self.actions = actions[:len(self.images)].astype(np.float32)
        self.actions = self.actions.reshape(len(actions), 1)
项目:e2c-pytorch    作者:ethanluoyc    | 项目源码 | 文件源码
def all_states(cls):
        _env = gym.make('Pendulum-v0').env
        width = GymPendulumDataset.width
        height = GymPendulumDataset.height
        X = np.zeros((360, width, height))

        for i in range(360):
            th = i / 360. * 2 * np.pi
            state = _env.render_state(th)
            X[i, :, :] = resize(rgb2gray(state), (width, height), mode='reflect')
        _env.close()
        _env.viewer.close()
        return X
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def load_image(filepath, as_grey=False, dtype='uint8', no_alpha=True):
    """
    Load image as numpy array from given filepath.

    Supported formats: gif, png, jpg, bmp, tif, npy

    >>> img = load_image('tests/data/img_formats/nut_color.jpg')
    >>> shapestr(img)
    '213x320x3'

    :param string filepath: Filepath to image file or numpy array.
    :param bool as_grey:
    :return: numpy array with shapes
             (h, w) for grayscale or monochrome,
             (h, w, 3) for RGB (3 color channels in last axis)
             (h, w, 4) for RGBA (for no_alpha = False)
             (h, w, 3) for RGBA (for no_alpha = True)
             pixel values are in range [0,255] for dtype = uint8
    :rtype: numpy ndarray
    """
    if filepath.endswith('.npy'):  # image as numpy array
        arr = np.load(filepath).astype(dtype)
        arr = rgb2gray(arr) if as_grey else arr
    else:
        # img_num=0 due to 
        # https://github.com/scikit-image/scikit-image/issues/2406
        arr = ski.imread(filepath, as_grey=as_grey, img_num=0).astype(dtype)
    if arr.ndim == 3 and arr.shape[2] == 4 and no_alpha:
        arr = arr[..., :3]  # cut off alpha channel
    return arr
项目:nuts-ml    作者:maet3608    | 项目源码 | 文件源码
def rgb2gray(image):
    """
    RGB scale image to grayscale image

    >>> image = np.eye(3, dtype='uint8') * 255
    >>> rgb2gray(image)
    array([[255,   0,   0],
           [  0, 255,   0],
           [  0,   0, 255]], dtype=uint8)

    :param numpy array image: Numpy array with range [0,255] and dtype 'uint8'. 
    :return: grayscale image
    :rtype:  numpy array with range [0,255] and dtype 'uint8'
    """
    return floatimg2uint8(skc.rgb2gray(image))
项目:DQN    作者:pekaalto    | 项目源码 | 文件源码
def process_image(img):
        return 2 * color.rgb2gray(transform.rescale(img[34:194], 0.5)) - 1
项目:DQN    作者:pekaalto    | 项目源码 | 文件源码
def process_image(obs):
        return 2 * color.rgb2gray(obs) - 1.0
项目:dqn    作者:elix-tech    | 项目源码 | 文件源码
def get_initial_state(self, observation, last_observation):
        processed_observation = np.maximum(observation, last_observation)
        processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
        state = [processed_observation for _ in xrange(STATE_LENGTH)]
        return np.stack(state, axis=0)
项目:dqn    作者:elix-tech    | 项目源码 | 文件源码
def preprocess(observation, last_observation):
    processed_observation = np.maximum(observation, last_observation)
    processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
    return np.reshape(processed_observation, (1, FRAME_WIDTH, FRAME_HEIGHT))
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def preprocessImage(self, img):
        '''Compute luminance (grayscale in range [0, 1]) and resize to (D, D).'''
        img = rgb2gray(img) # compute luminance 210x160
        img = resize(img, (self.agent.D, self.agent.D), mode='constant') # resize image
        return img
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def preprocessImage(self, img):
        '''Compute luminance (grayscale in range [0, 1]) and resize to (D, D).'''
        img = rgb2gray(img) # compute luminance 210x160
        img = resize(img, (self.D, self.D), mode='constant') # resize image
        return img
项目:oslodatascience-rl    作者:Froskekongen    | 项目源码 | 文件源码
def _preprocessImage(self, img):
        '''Compute luminance (grayscale in range [0, 1]) and resize to (D, D).'''
        img = rgb2gray(img) # compute luminance 210x160
        img = resize(img, (self.agent.D, self.agent.D), mode='constant') # resize image
        return img
项目:mrflow    作者:jswulff    | 项目源码 | 文件源码
def plot_figure_video_rigidity_example(image, rigidity):
    # Figure 93
    PTH='./figure_rigidity_example/'
    if not os.path.isdir(PTH):
        os.makedirs(PTH)

    I_bw = color.rgb2gray(image)
    I_bw = np.dstack((I_bw,I_bw,I_bw))*0.5

    I_bw[:,:,0][rigidity==1] += 0.5
    I_bw[:,:,2][rigidity==0] += 0.5

    io.imsave(PTH+'image.png', image)
    io.imsave(PTH+'rigidity.png', I_bw)
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def get_preprocessed_frame(self, observation):
        if isinstance(self.env.observation_space, Discrete):
            expanded_obs = np.zeros(self.env.observation_space.n, dtype=np.float32)
            expanded_obs[observation] = 1
            return expanded_obs
        elif len(observation.shape) > 1:
            if not self.use_rgb:
                observation = rgb2gray(observation)
            return resize(observation, (self.resized_width, self.resized_height))
        else:
            return observation
项目:harpreif    作者:harpribot    | 项目源码 | 文件源码
def get_puzzle_pieces(self):
        """
        returns the puzzle pieces, as well as their true locations in row major numbering format, as a dictionary,
        where the key, is row_major puzzle_piece_id and the value is the piece image itself
        :return: The dictionary of piece_id => piece_image
        """
        result = dict()
        for piece_id, piece in enumerate(self.tiles):
            piece_image = np.array(piece.image)
            result[piece_id] = rgb2gray(piece_image)

        return result
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def hog_gen_windows(work_tuple):
    image_arr, coords = work_tuple
    lx1,ly1,rx1,ry1 = coords
    if image_arr.ndim > 2:
        image_arr = resize(color.rgb2gray(image_arr)[ly1:ry1, lx1:rx1], (120, 120))
    hog_image_rescaled = generate_hog_features(image_arr)
    return hog_image_rescaled
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def hog_gen(image, path=0):
    if path != 0 and image == 0:
        image = imread(path)
    if image.ndim > 2:
        image = color.rgb2gray(image)
    hog_image_rescaled = generate_hog_features(image)
    return hog_image_rescaled
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def generate_test_set(color_img):
    img_arr = color.rgb2gray(color_img)
    img_transformed = resize(img_arr, output_shape=(120, 120))
    hog_image = generate_hog_features(img_transformed)
    return hog_image
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def generate_hog_features(filename):
    input_image = io.imread(filename)
    gray_image = color.rgb2gray(input_image)
    # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1)
    fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True)
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    return hog_image_rescaled
项目:Sign-Language-Recognition    作者:achyudhk    | 项目源码 | 文件源码
def save_hog_image_comparison(filename):
    input_image = io.imread(filename)
    gray_image = color.rgb2gray(input_image)
    out_filename = "hog/" + filename

    # 87% for orientations=8, pixels_per_cell=(4, 4), cells_per_block=(1, 1)
    fd, hog_image = hog(gray_image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True)
    # io.imsave("hog/" + filename, hog_image)
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

    ax1.axis('off')
    ax1.imshow(gray_image, cmap=plt.cm.gray)
    ax1.set_title('Input image')
    ax1.set_adjustable('box-forced')

    # Rescale histogram for better display
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))
    ax2.axis('off')
    ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
    ax2.set_title('Histogram of Oriented Gradients')
    ax1.set_adjustable('box-forced')
    plt.savefig(out_filename)
    plt.close()

    return hog_image