我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用PIL.Image.eval()。
def save_target_image(self, source, name, x_offset, y_offset, x_size, y_size, flip, convert): m_img = Image.open(source) if x_size!=0 and y_size!=0: m_img = m_img.crop((x_offset, y_offset, x_offset + x_size, y_offset + y_size)) if flip is True: m_img = m_img.transpose(Image.FLIP_LEFT_RIGHT) if convert is True: m_img.load() alpha = m_img.split()[- 1] m_img = m_img.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=255) mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) m_img.paste(255, mask) m_img.save(join(self.emotedb_path, name) + ".png", transparency=255, optimize=True) else: m_img.save(join(self.emotedb_path, name) + ".png", optimize=True)
def save_target_image(self, source, name, x_offset, y_offset, x_size, y_size, flip, convert): m_img = Image.open(source) if x_size!=0 and y_size!=0: m_img = m_img.crop((x_offset, y_offset, x_offset + x_size, y_offset + y_size)) if flip is True: m_img = m_img.transpose(Image.FLIP_LEFT_RIGHT) if convert is True: m_img.load() alpha = m_img.split()[- 1] m_img = m_img.convert('RGB').convert('P', palette=Image.ADAPTIVE, colors=255) mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) m_img.paste(255, mask) m_img.save(join(self.emotes_path, name) + ".png", transparency=255, optimize=True) else: m_img.save(join(self.emotes_path, name) + ".png", optimize=True)
def test1(): filename = '../ETL8G/ETL8G_01' id_record = 0 with open(filename, 'rb') as f: f.seek(id_record * sz_record) r = read_record_ETL8G(f) print(r[0:-2], hex(r[1])) iE = Image.eval(r[-1], lambda x: 255 - x * 16) fn = '../tmp/ETL8G_{:d}_{:s}.png'.format((r[0] - 1) % 20 + 1, hex(r[1])[-4:]) iE.save(fn, 'PNG')
def test2(): filename = '../ETL8G/ETL8G_01' id_dataset = 0 new_img = Image.new('L', (128 * 32, 128 * 30)) with open(filename, 'rb') as f: f.seek(id_dataset * 956 * sz_record) for i in range(956): r = read_record_ETL8G(f) new_img.paste(r[-1], (128 * (i % 32), 128 * (i // 32))) iE = Image.eval(new_img, lambda x: 255 - x * 16) fn = '../tmp/ETL8G_ds{:03d}.png'.format(id_dataset) iE.save(fn, 'PNG')
def dump_all(): for j in range(1, 33): for id_dataset in range(5): new_img = Image.new('L', (128 * 32, 128 * 30)) filename = '../ETL8G/ETL8G_{:02d}'.format(j) with open(filename, 'rb') as f: f.seek(id_dataset * 956 * sz_record) for i in range(956): r = read_record_ETL8G(f) new_img.paste(r[-1], (128 * (i % 32), 128 * (i // 32))) iE = Image.eval(new_img, lambda x: 255 - x * 16) fn = '../tmp/ETL8G_ds{:02d}_{:01d}.png'.format(j, id_dataset) iE.save(fn, 'PNG')
def combine_pixels(self,p1,p2,alpha=0.5): return tuple([round(alpha*p1[i] + (1 - alpha)*p2[i]) for i in range(3)]) # The use of Image.eval applies the func to each BAND, independently, if image pixels are RGB tuples.
def map_image(self,func,image=False): # "Apply func to each pixel of the image, returning a new image" image = image if image else self.image return Imager(image=Image.eval(image,func)) # Eval creates a new image, so no need for me to do a copy. # This applies the function to each RGB TUPLE, returning a new tuple to appear in the new image. So func # must return a 3-tuple if the image has RGB pixels.
def convertImagesToPIL(self, images, dither, nq=0, images_info=None): """ convertImagesToPIL(images, nq=0) Convert images to Paletted PIL images, which can then be written to a single animated GIF. """ # Convert to PIL images images2 = [] for im in images: if isinstance(im, Image.Image): images2.append(im) elif np and isinstance(im, np.ndarray): if im.ndim == 3 and im.shape[2] == 3: im = Image.fromarray(im, 'RGB') elif im.ndim == 3 and im.shape[2] == 4: # im = Image.fromarray(im[:,:,:3],'RGB') self.transparency = True im = Image.fromarray(im[:, :, :4], 'RGBA') elif im.ndim == 2: im = Image.fromarray(im, 'L') images2.append(im) # Convert to paletted PIL images images, images2 = images2, [] if nq >= 1: # NeuQuant algorithm for im in images: im = im.convert("RGBA") # NQ assumes RGBA nqInstance = NeuQuant(im, int(nq)) # Learn colors from image if dither: im = im.convert("RGB").quantize(palette=nqInstance.paletteImage(), colors=255) else: im = nqInstance.quantize(im, colors=255) # Use to quantize the image itself self.transparency = True # since NQ assumes transparency if self.transparency: alpha = im.split()[3] mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) im.paste(255, mask=mask) images2.append(im) else: # Adaptive PIL algorithm AD = Image.ADAPTIVE # for index,im in enumerate(images): for i in range(len(images)): im = images[i].convert('RGB').convert('P', palette=AD, dither=dither, colors=255) if self.transparency: alpha = images[i].split()[3] mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0) im.paste(255, mask=mask) images2.append(im) # Done return images2