我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用PIL.Image.Image()。
def __init__(self, image, samplefac=10, colors=256): # Check Numpy if np is None: raise RuntimeError("Need Numpy for the NeuQuant algorithm.") # Check image if image.size[0] * image.size[1] < NeuQuant.MAXPRIME: raise IOError("Image is too small") if image.mode != "RGBA": raise IOError("Image mode should be RGBA.") # Initialize self.setconstants(samplefac, colors) self.pixels = np.fromstring(image.tostring(), np.uint32) self.setUpArrays() self.learn() self.fix() self.inxbuild()
def quantize_without_scipy(self, image): """" This function can be used if no scipy is availabe. It's 7 times slower though. """ w,h = image.size px = np.asarray(image).copy() memo = {} for j in range(w): for i in range(h): key = (px[i,j,0],px[i,j,1],px[i,j,2]) try: val = memo[key] except KeyError: val = self.convert(*key) memo[key] = val px[i,j,0],px[i,j,1],px[i,j,2] = val return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image): """" This function can be used if no scipy is availabe. It's 7 times slower though. """ w, h = image.size px = np.asarray(image).copy() memo = {} for j in range(w): for i in range(h): key = (px[i, j, 0], px[i, j, 1], px[i, j, 2]) try: val = memo[key] except KeyError: val = self.convert(*key) memo[key] = val px[i, j, 0], px[i, j, 1], px[i, j, 2] = val return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def get_mask(self, fill=1, outline=0): """ Get a mask that is nonzero within the object. :param fill: A color to use on the object's interior :param outline: A color to use on the object's edge (single pixel) :return: A 2D numpy array of uint8 """ if self.type in (TYPE_POLYGON, TYPE_BOUNDING_BOX): (top, left, bottom, right) = self.polygon.bounds() w = right-left h = bottom-top mask = Image.new("I", (w, h)) d = ImageDraw.Draw(mask) d.polygon([(px-left, py-top) for (px, py) in self.polygon.points], fill=fill, outline=outline) del d return np.asarray(mask) else: assert False, "Unhandled Type"
def normalize2RGB(self, *args, _image=None, _step=None): if _image is None: step = {'op': self.normalize2RGB} self.steps.append(step) return self else: if _image.mode == "RGB": pass elif _image.mode == "L": _image = Image.merge("RGB", (_image, _image, _image)) elif _image.mode == "CMYK": _image = _image.convert('RGB') elif _image.mode == "RGBA": _image = _image.convert('RGB') else: raise NotImplementedError('Unknown image format. {}'.format(_image.mode)) _image.load() return _image
def _take_screenshot(self, screenshot=False, name_prefix='unknown'): """ This is different from _save_screenshot. The return value maybe None or the screenshot path Args: screenshot: bool or PIL image """ if isinstance(screenshot, bool): if not screenshot: return return self._save_screenshot(name_prefix=name_prefix) if isinstance(screenshot, Image.Image): return self._save_screenshot(screen=screenshot, name_prefix=name_prefix) raise TypeError("invalid type for func _take_screenshot: "+ type(screenshot))
def assert_equal(self, v1, v2, **kwargs):#, desc=None, screenshot=False, safe=False): """ Check v1 is equals v2, and take screenshot if not equals Args: - desc (str): some description - safe (bool): will omit AssertionError if set to True - screenshot: can be type <None|True|False|PIL.Image> """ is_success = v1 == v2 if is_success: message = "assert equal success, %s == %s" %(v1, v2) else: message = '%s not equal %s' % (v1, v2) kwargs.update({ 'message': message, 'success': is_success, }) self._add_assert(**kwargs)
def test_strip(self): strip = imagestrip.ImageStrip("test", description="just a test") col = self.l8SR_col.filterBounds(self.pol_L8SR).filterDate( "2013-01-01", "2013-06-01") list_imgs = col.toList(10) viz_params = {'bands':["B4", "B5", "B3"], 'min':0, 'max':5000} region = self.pol_L8SR.bounds().getInfo()["coordinates"] i = strip.from_collection([col], viz_param=viz_params, region=region, name="test", folder="files", drawRegion=True, zoom=2, properties=["CLOUD_COVER", "solar_zenith_angle"], description="test") self.assertIsInstance(i, Image)
def _round_image(cls, image: PILImage.Image): if image.mode != 'RGBA': image = image.convert('RGBA') mask_size = image.size[0] * cls.ANTIALIAS_RATIO, image.size[1] * cls.ANTIALIAS_RATIO mask = PILImage.new('L', mask_size, color=0) mask_draw = cls._get_image_draw(mask) mask_draw.ellipse(((0, 0), mask.size), fill=255) mask = mask.resize(image.size, PILImage.LANCZOS) canvas = PILImage.new('RGBA', image.size, color=(0, 0, 0, 0)) canvas.paste(image, mask=mask) return canvas
def draw_doge_meme(from_dir, to_dir, font_path, phrases): """ Draw a doge meme, given an image path and text to draw on it. Args: from_dir (str): Directory of template doge image. to_dir (str): Path where to store result, including file name and extension. font_path (str): Directory of font to use. phrases (list[str]): Doge phrases to draw onto image. """ image = Image.open(from_dir) texts = [] for phrase in phrases: new_text = make_drawn_text( image, phrase, font_path, texts ) texts.append(new_text) for text in texts: text.draw(image) image.save(to_dir)
def make_drawn_text(image, text, font_path, existing_texts): """ Create DrawnText to draw on image. Args: image (Image.Image): Image to draw on. text (str): Text to draw. font_path (str): Directory of font to use. existing_texts (list[DrawnText]): List of already existing drawn texts. """ width, height = image.size font = get_font(image, text, font_path, 0.3) color = random.choice(TEXT_COLORS) max_x = width - font.getsize(text)[0] max_y = height - font.getsize(text)[1] x, y = random.randint(0, max_x), random.randint(0, max_y) drawn_text = DrawnText(text, color, font, x, y) num_attempts = 0 while any(drawn_text.intersects(txt) for txt in existing_texts): drawn_text.x = random.randint(0, max_x) drawn_text.y = random.randint(0, max_y) num_attempts += 1 if num_attempts > 10: break return drawn_text
def get_font(image, text, font_path, img_width_fraction): """ Get desired font for image. Args: image (Image.Image): Image being drawn on. text (str): Text being drawn. font_path (str): Path to font. img_width_fraction (float): Fraction of image's width that text's width should be. Returns: ImageFont.Font: Font to draw text with. """ width, height = image.size font_size = 1 font = ImageFont.truetype(font_path, font_size) # +1 is to ensure font size is below requirement while (font.getsize(text)[0]+1) < img_width_fraction*width and font_size < MAX_FONT_SIZE: font_size += 1 font = ImageFont.truetype(font_path, font_size) return font
def lab_array_to_image(images_array, normalized=True): # type: (numpy.ndarray, any) -> typing.List[Image.Image] images_array = images_array.transpose(0, 2, 3, 1) if normalized: images_array[:, :, :, 0] = images_array[:, :, :, 0] + 1 images_array *= 50 def lab2image(image_array): image_array = image_array.astype(dtype=numpy.float64) rgb = (lab2rgb(image_array) * 255).astype(numpy.uint8) image = Image.fromarray(rgb) return image images = [lab2image(image_array) for image_array in images_array] return images
def save_images(images, path_directory, prefix_filename): """ save image as [prefix_filename][index of image].png """ # type: (typing.List[Image.Image], any, any) -> any if not os.path.exists(path_directory): os.mkdir(path_directory) filepath_list = [] for i, image in enumerate(images): filename = prefix_filename + str(i) + '.png' filepath = os.path.join(path_directory, filename) image.save(filepath) filepath_list += [filepath] return filepath_list
def modify(self, function, *args, **kwargs): """ Modify the image object using the given Image function. This function supplies sequence support. """ if not gif_support or not self.gif: self.object = function(self.object, *args, **kwargs) else: frames = [] duration = self.object.info.get("duration") / 1000 for frame in ImageSequence.Iterator(self.object): frame_bytes = utils.convert_image_object(function(frame, *args, **kwargs)) frames.append(imageio.imread(frame_bytes, format="PNG")) # Save the image as bytes and recreate the image object image_bytes = imageio.mimwrite(imageio.RETURN_BYTES, frames, format=self.format, duration=duration) self.object = Image.open(BytesIO(image_bytes)) self.gif_bytes = image_bytes
def resize(message: discord.Message, image_arg: image, resolution: parse_resolution, *options, extension: str.lower=None): """ Resize an image with the given resolution formatted as `<width>x<height>` or `*<scale>` with an optional extension. """ if extension: image_arg.set_extension(extension) # Generate a new image based on the scale if resolution[1] == 0: w, h = image_arg.object.size scale = resolution[0] assert w * scale < 3000 and h * scale < 3000, "**The result image must be less than 3000 pixels in each axis.**" resolution = (int(w * scale), int(h * scale)) # Resize and upload the image image_arg.modify(Image.Image.resize, resolution, Image.NEAREST if "-nearest" in options else Image.ANTIALIAS) await send_image(message, image_arg)
def from_text(cls, text, font, fg="black", bg=None, padding=0, max_width=None, line_spacing=0, align="left", tokenizer=whitespace_span_tokenize, hyphenator=None): """Create image from text. If max_width is set, uses the tokenizer and optional hyphenator to split text across multiple lines.""" padding = Padding(padding) if bg is None: bg = ImageColor.getrgba(fg)._replace(alpha=0) if max_width is not None: text = ImageDraw.word_wrap(text, font, max_width, tokenizer, hyphenator) w,h = ImageDraw.textsize(text, font, spacing=line_spacing) if max_width is not None and w > max_width: logger.warning("Text cropped as too wide to fit: {}".format(text)) w = max_width img = Image.new("RGBA", (w + padding.x, h + padding.y), bg) draw = ImageDraw.Draw(img) draw.text((padding.l, padding.u), text, font=font, fill=fg, spacing=line_spacing, align=align) return img
def from_pattern(cls, pattern, size, align=0, scale=(False,False), preserve_aspect=False, resample=Image.LANCZOS): """Create an image using a background pattern, either scaled or tiled.""" align = Alignment(align) img = Image.new("RGBA", size) if preserve_aspect: if scale[0] and scale[1]: raise ValueError("Cannot preserve aspect when scaling in both dimensions.") elif scale[0]: pattern = pattern.resize_fixed_aspect(width=size[0], resample=resample) elif scale[1]: pattern = pattern.resize_fixed_aspect(height=size[1], resample=resample) else: if scale[0]: pattern = pattern.resize((size[0], pattern.height), resample=resample) if scale[1]: pattern = pattern.resize((pattern.width, size[1]), resample=resample) xover = (pattern.width - size[0] % pattern.width) % pattern.width yover = (pattern.height - size[1] % pattern.height) % pattern.height for i in range(ceil(size[0] / pattern.width)): for j in range(ceil(size[1] / pattern.height)): x = int(i*pattern.width-xover*align.x) y = int(j*pattern.height-yover*align.y) img.overlay(pattern, (x,y)) return img
def from_array(cls, array, xalign=0.5, yalign=0.5, padding=0, bg=0): """Create an image from an array of images.""" if not non_string_iterable(xalign): xalign = [xalign] * max(len(r) for r in array) if not non_string_iterable(yalign): yalign = [yalign] * len(array) align = [[Alignment((xalign[c], yalign[r])) for c,_ in enumerate(row)] for r,row in enumerate(array)] padding = Padding(padding) heights = [max(img.height if img is not None else 0 for img in row) + padding.y for row in array] widths = [max(img.width if img is not None else 0 for img in column) + padding.x for column in zip_longest(*array)] aimg = Image.new("RGBA", (sum(widths), sum(heights)), bg) for r,row in enumerate(array): for c,img in enumerate(row): if img is None: continue x = sum(widths[0:c]) + padding.l + int(align[r][c].x * (widths[c] - (img.width + padding.x))) y = sum(heights[0:r]) + padding.u + int(align[r][c].y * (heights[r] - (img.height + padding.y))) aimg.overlay(img, (x,y)) return aimg
def pad_to_aspect(self, aspect, divisor=1, align=0.5, bg="black", offsets=None): """Return a padded image with the given aspect ratio. Updates optional offset structure.""" if aspect == self.width == 0 or divisor == self.height == 0: return self align = Alignment(align) if self.width * divisor > self.height * aspect: newwidth = self.width newheight = int(self.width * (divisor / aspect)) else: newwidth = int(self.height * (aspect / divisor)) newheight = self.height img = Image.new("RGBA", (newwidth, newheight), bg) x = int(align.x * (newwidth - self.width)) y = int(align.y * (newheight - self.height)) if offsets is not None: padding = Padding((x, y, img.width-self.width-x, img.height-self.height-y)) offsets.update(offsets + padding) return img.overlay(self, (x, y), None)
def __new__(cls, size, fg="black", bg=None, antialias=4, invert=False, **kwargs): """Generate an image of the appropriate shape. See mask method for additional shape-specific parameters. - size (int/(int,int)): image size - fg (color/pattern): image foreground [black] - bg (color/pattern): image background [None] - antialias (x>0): level of antialiasing (if supported), where 1.0 is none [4.0] - invert (boolean): whether to invert the shape mask [False] """ if isinstance(size, Integral): size = (size, size) if bg is None: bg = ImageColor.getrgba(fg)._replace(alpha=0) if cls.antialiasing: orig_size, size = size, [round(s * antialias) for s in size] if isinstance(bg, Image.Image): bg = bg.resize([round(s * antialias) for s in bg.size], Image.NEAREST) if isinstance(fg, Image.Image): fg = fg.resize([round(s * antialias) for s in fg.size], Image.NEAREST) mask = cls.mask(size, **kwargs) if invert: mask = mask.invert_mask() base = Image.from_pattern(bg, mask.size) if isinstance(bg, Image.Image) else Image.new("RGBA", mask.size, bg) fore = Image.from_pattern(fg, mask.size) if isinstance(fg, Image.Image) else Image.new("RGBA", mask.size, fg) img = base.overlay(fore, mask=mask) if cls.antialiasing: img = img.resize(orig_size, resample=Image.LANCZOS if antialias > 1 else Image.NEAREST) return img
def update(self, pil_image: Image.Image): """Update the texture to contain the provided image. Args: pil_image (PIL.Image.Image): The image to write into the texture. """ # Ensure the image is in RGBA format and convert to the raw RGBA bytes. image_width, image_height = pil_image.size image = pil_image.convert("RGBA").tobytes("raw", "RGBA") # Bind the texture so that it can be modified. self.bind() if (self._width==image_width) and (self._height==image_height): # Same size - just need to update the texels. glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, image_width, image_height, GL_RGBA, GL_UNSIGNED_BYTE, image) else: # Different size than the last frame (e.g. the Window is resizing) # Create a new texture of the correct size. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, image_width, image_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, image) self._width = image_width self._height = image_height
def test_getitem(self, mock): # prepare mock. shape = (256, 256) mock.side_effect = [Image.new('RGB', shape), Image.new('RGB', shape)] # test. image, pose, visibility = self.dataset[0] eq_(type(image), Image.Image) eq_(image.size, (256, 256)) eq_(image.mode, 'RGB') eq_(type(pose), torch.FloatTensor) ok_((pose == torch.Tensor([[108, 50], [148, 180], [148, 180]])).all()) eq_(type(visibility), torch.FloatTensor) ok_((visibility == torch.Tensor([[1, 1], [0, 0], [1, 1]])).all()) image, pose, visibility = self.dataset[1] eq_(type(image), Image.Image) eq_(image.size, (256, 256)) eq_(image.mode, 'RGB') eq_(type(pose), torch.FloatTensor) ok_((pose == torch.Tensor([[40, 50], [160, 180], [160, 180]])).all()) eq_(type(visibility), torch.FloatTensor) ok_((visibility == torch.Tensor([[1, 1], [1, 1], [0, 0]])).all())
def test_getitem(self, mock): # prepare mock. shape = (256, 256) mock.side_effect = [Image.new('RGB', shape), Image.new('RGB', shape)] # test. image, pose, visibility = self.dataset[0] eq_(type(image), torch.FloatTensor) eq_(image.size(), (3, 227, 227)) eq_(type(pose), torch.FloatTensor) ok_(torch.dist(pose, torch.Tensor([[94, 49], [134, 179], [134, 179]])/227) < 1.e-5) eq_(type(visibility), torch.FloatTensor) ok_((visibility == torch.Tensor([[1, 1], [0, 0], [1, 1]])).all()) image, pose, visibility = self.dataset[1] eq_(type(image), torch.FloatTensor) eq_(image.size(), (3, 227, 227)) ok_(torch.dist(pose, torch.Tensor([[40, 49], [160, 179], [160, 179]])/227) < 1.e-5) eq_(type(visibility), torch.FloatTensor) ok_((visibility == torch.Tensor([[1, 1], [1, 1], [0, 0]])).all())
def test_getitem(self, mock): # prepare mock. shape = (256, 256) mock.side_effect = [Image.new('RGB', shape), Image.new('RGB', shape)] # test. for i in range(len(self.dataset)): image, pose, visibility = self.dataset[i] eq_(type(image), torch.FloatTensor) eq_(image.size(), (3, 227, 227)) ok_((image >= 0).all()) ok_((image <= 1).all()) eq_(type(pose), torch.FloatTensor) ok_((pose >= 0).all()) ok_((pose <= 1).all()) eq_(type(visibility), torch.FloatTensor) eq_(visibility.size(), (3, 2)) for j in range(3): eq_(visibility[j].sum(), visibility[j, 0]*2)
def getImageDescriptor(im): """ Used for the local color table properties per image. Otherwise global color table applies to all frames irrespective of wether additional colours comes in play that require a redefined palette Still a maximum of 256 color per frame, obviously. Written by Ant1 on 2010-08-22 """ bb = '\x2C' # Image separator, bb += intToBin( 0 ) # Left position bb += intToBin( 0 ) # Top position bb += intToBin( im.size[0] ) # image width bb += intToBin( im.size[1] ) # image height bb += '\x87' # packed field : local color table flag1, interlace0, sorted table0, reserved00, lct size111=7=2^(7+1)=256. # LZW minimum size code now comes later, begining of [image data] blocks return bb #def getAppExt(loops=float('inf')): #compile error commented by zcwang
def __init__(self, image, samplefac=10, colors=256): # Check Numpy if np is None: raise RuntimeError("Need Numpy for the NeuQuant algorithm.") # Check image if image.size[0] * image.size[1] < NeuQuant.MAXPRIME: raise IOError("Image is too small") assert image.mode == "RGBA" # Initialize self.setconstants(samplefac, colors) self.pixels = np.fromstring(image.tostring(), np.uint32) self.setUpArrays() self.learn() self.fix() self.inxbuild()
def quantize_without_scipy(self, image): """" This function can be used if no scipy is availabe. It's 7 times slower though. """ w,h = image.size px = np.asarray(image).copy() memo = {} for j in range(w): for i in range(h): key = (px[i,j,0],px[i,j,1],px[i,j,2]) try: val = memo[key] except KeyError: val = self.convert(key) memo[key] = val px[i,j,0],px[i,j,1],px[i,j,2] = val return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def _rescale_or_crop(image: Image.Image, pad_w: int, pad_h: int, rescale_w: bool, rescale_h: bool, keep_aspect_ratio: bool) -> Image.Image: """Rescale and/or crop the image based on the rescale configuration.""" orig_w, orig_h = image.size if orig_w == pad_w and orig_h == pad_h: return image if rescale_w and rescale_h and not keep_aspect_ratio: image.thumbnail((pad_w, pad_h)) elif rescale_w and rescale_h and keep_aspect_ratio: ratio = min(pad_h / orig_h, pad_w / orig_h) image.thumbnail((int(orig_w * ratio), int(orig_h * ratio))) elif rescale_w and not rescale_h: orig_w, orig_h = image.size if orig_w != pad_w: ratio = pad_w / orig_w image.thumbnail((pad_w, int(orig_h * ratio))) elif rescale_h and not rescale_w: orig_w, orig_h = image.size if orig_h != pad_h: ratio = pad_h / orig_h image.thumbnail((int(orig_w * ratio), pad_h)) return _crop(image, pad_w, pad_h)
def test_basic_service(): service = Service( code=40215, id=104707, description="SEDEX 10", category="SERVICO_COM_RESTRICAO", symbol="premium", max_weight=10000, ) assert service.id == 104707 assert service.code == '40215' assert service.display_name == "SEDEX 10" assert service.description == "SEDEX 10" assert service.category == "SERVICO_COM_RESTRICAO" assert service.get_symbol_filename().endswith("/premium.gif") assert service.get_symbol_filename("png").endswith("/premium.png") assert isinstance(service.symbol_image, Image) assert service.max_weight == 10000
def __init__(self, custom_id: int, logo: Optional[Union[str, Image.Image]] = None) -> None: # will be filled by close_posting_list self.number = None # type: Optional[int] if logo is None: logo = str(get_resource_path("carrier_logo.png")) if isinstance(logo, str): logo = Image.open(logo) self.logo = logo self.custom_id = custom_id self.shipping_labels = {} # type: Dict[str, ShippingLabel] # filled by the first shipping label self.initial_shipping_label = None # type: Optional[ShippingLabel] self.posting_card = None # type: PostingCard self.contract = None # type: Contract self.sender = None # type: Address
def image_to_bitmap(self, image): if isinstance(image, Image.Image): img = image.convert('L') else: img = Image.open(image).convert('L') pixels = img.load() width, height = img.size bitmap = [] for x in range(width): col_byte = 0x00 for y in range(height): if pixels[x, y] > 127: col_byte += 1 << (8 - y%8 - 1) if (y+1) % 8 == 0: bitmap.append(col_byte) col_byte = 0x00 return bitmap
def binary_clock(self, block_width = 3, block_height = 3, block_spacing_x = 1, block_spacing_y = 1, **kwargs): width = 6*block_width + 5*block_spacing_x height = 2*block_height + block_spacing_y img = Image.new('RGBA', (width, height), 'black') draw = ImageDraw.Draw(img) now = datetime.datetime.now() hour_bits = [now.hour >> i & 1 for i in range(7, -1, -1)][-6:] minute_bits = [now.minute >> i & 1 for i in range(7, -1, -1)][-6:] y = 0 for pos, bit in enumerate(hour_bits): x = pos * (block_width + block_spacing_x) draw.rectangle((x, y, x + block_width-1, y + block_height-1), outline = 'white', fill = 'white' if bit else 'black') y = block_height + block_spacing_y for pos, bit in enumerate(minute_bits): x = pos * (block_width + block_spacing_x) draw.rectangle((x, y, x + block_width-1, y + block_height-1), outline = 'white', fill = 'white' if bit else 'black') self.bitmap(img, **kwargs)
def text(self, text, font = None, size = 20, color = 'white', timestring = False, **kwargs): font = font or self.DEFAULT_FONT if timestring: text = time.strftime(text) textfont, truetype = self.get_imagefont(font, size) approx_tsize = textfont.getsize(text) text_img = Image.new('RGBA', approx_tsize, (0, 0, 0, 0)) text_draw = ImageDraw.Draw(text_img) text_draw.fontmode = "1" text_draw.text((0, 0), text, color, font = textfont) if truetype: # font.getsize is inaccurate on non-pixel fonts text_img = text_img.crop(text_img.getbbox()) else: # only crop horizontally with pixel fonts bbox = text_img.getbbox() text_img = text_img.crop((bbox[0], 0, bbox[2], text_img.size[1])) self.bitmap(text_img, **kwargs)
def get_cKDTree(): try: from scipy.spatial import cKDTree except ImportError: cKDTree = None return cKDTree # getheader gives a 87a header and a color palette (two elements in a list). # getdata()[0] gives the Image Descriptor up to (including) "LZW min code size". # getdatas()[1:] is the image data itself in chuncks of 256 bytes (well # technically the first byte says how many bytes follow, after which that # amount (max 255) follows).
def checkImages(images): """ checkImages(images) Check numpy images and correct intensity range etc. The same for all movie formats. """ # Init results images2 = [] for im in images: if PIL and isinstance(im, PIL.Image.Image): # We assume PIL images are allright images2.append(im) elif np and isinstance(im, np.ndarray): # Check and convert dtype if im.dtype == np.uint8: images2.append(im) # Ok elif im.dtype in [np.float32, np.float64]: im = im.copy() im[im<0] = 0 im[im>1] = 1 im *= 255 images2.append( im.astype(np.uint8) ) else: im = im.astype(np.uint8) images2.append(im) # Check size if im.ndim == 2: pass # ok elif im.ndim == 3: if im.shape[2] not in [3,4]: raise ValueError('This array can not represent an image.') else: raise ValueError('This array can not represent an image.') else: raise ValueError('Invalid image type: ' + str(type(im))) # Done return images2
def getImageDescriptor(self, im, xy=None): """ getImageDescriptor(im, xy=None) Used for the local color table properties per image. Otherwise global color table applies to all frames irrespective of whether additional colors comes in play that require a redefined palette. Still a maximum of 256 color per frame, obviously. Written by Ant1 on 2010-08-22 Modified by Alex Robinson in Janurari 2011 to implement subrectangles. """ # Defaule use full image and place at upper left if xy is None: xy = (0,0) # Image separator, bb = '\x2C' # Image position and size bb += intToBin( xy[0] ) # Left position bb += intToBin( xy[1] ) # Top position bb += intToBin( im.size[0] ) # image width bb += intToBin( im.size[1] ) # image height # packed field: local color table flag1, interlace0, sorted table0, # reserved00, lct size111=7=2^(7+1)=256. bb += '\x87' # LZW minimum size code now comes later, begining of [image data] blocks return bb
def paletteImage(self): """ PIL weird interface for making a paletted image: create an image which already has the palette, and use that in Image.quantize. This function returns this palette image. """ if self.pimage is None: palette = [] for i in range(self.NETSIZE): palette.extend(self.colormap[i][:3]) palette.extend([0]*(256-self.NETSIZE)*3) # a palette image to use for quant self.pimage = Image.new("P", (1, 1), 0) self.pimage.putpalette(palette) return self.pimage
def quantize_with_scipy(self, image): w,h = image.size px = np.asarray(image).copy() px2 = px[:,:,:3].reshape((w*h,3)) cKDTree = get_cKDTree() kdtree = cKDTree(self.colormap[:,:3],leafsize=10) result = kdtree.query(px2) colorindex = result[1] print("Distance: %1.2f" % (result[0].sum()/(w*h)) ) px2[:] = self.colormap[colorindex,:3] return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def _create_map_image(self): images = self._get_images() self._map_image = Image.new('RGBA', self._map_size) for img in images: if not isinstance(img, Image.Image): img = Image.open(BytesIO(img.content)).convert('RGBA') self._map_image = Image.alpha_composite(self._map_image, img)
def raise_ioerror(error): try: message = Image.core.getcodecstatus(error) except AttributeError: message = ERRORS.get(error) if not message: message = "decoder error %d" % error raise IOError(message + " when reading image file") # # -------------------------------------------------------------------- # Helpers
def __init__(self, fp=None, filename=None): Image.Image.__init__(self) self.tile = None self.readonly = 1 # until we know better self.decoderconfig = () self.decodermaxblock = MAXBLOCK if isPath(fp): # filename self.fp = open(fp, "rb") self.filename = fp else: # stream self.fp = fp self.filename = filename try: self._open() except (IndexError, # end of data TypeError, # end of data (ord) KeyError, # unsupported mode EOFError, # got header but not the first frame struct.error) as v: raise SyntaxError(v) if not self.mode or self.size[0] <= 0: raise SyntaxError("not identified by this driver")
def load_prepare(self): # create image memory if necessary if not self.im or\ self.im.mode != self.mode or self.im.size != self.size: self.im = Image.core.new(self.mode, self.size) # create palette (optional) if self.mode == "P": Image.Image.load(self)
def close(self): """ (Consumer) Close the stream. :returns: An image object. :exception IOError: If the parser failed to parse the image file either because it cannot be identified or cannot be decoded. """ # finish decoding if self.decoder: # get rid of what's left in the buffers self.feed(b"") self.data = self.decoder = None if not self.finished: raise IOError("image was incomplete") if not self.image: raise IOError("cannot parse this image") if self.data: # incremental parsing not possible; reopen the file # not that we have all data try: fp = io.BytesIO(self.data) self.image = Image.open(fp) finally: self.image.load() fp.close() # explicitly close the virtual file return self.image # --------------------------------------------------------------------