我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用PIL.Image.merge()。
def GetImageBody(img, compressed=0): color = (0, 0, 0) if img.mode == "RGB": background = img elif img.mode == "RGBA": background = Image.new("RGB", img.size, color) img.load() background.paste(img, mask=img.split()[3]) # alpha channel elif img.mode == "P" or img.mode == "L": background = Image.new("RGB", img.size, color) img.load() background.paste(img) #background.save("splash.png") else: print ("sorry, can't support this format") sys.exit() if compressed == 1: return encodeRLE24(background) else: r, g, b = background.split() return Image.merge("RGB",(b,g,r)).tobytes() ## make a image
def normalize2RGB(self, *args, _image=None, _step=None): if _image is None: step = {'op': self.normalize2RGB} self.steps.append(step) return self else: if _image.mode == "RGB": pass elif _image.mode == "L": _image = Image.merge("RGB", (_image, _image, _image)) elif _image.mode == "CMYK": _image = _image.convert('RGB') elif _image.mode == "RGBA": _image = _image.convert('RGB') else: raise NotImplementedError('Unknown image format. {}'.format(_image.mode)) _image.load() return _image
def lsb_encode(data, image): bytes_io = BytesIO() dump(data, file=bytes_io) data_bytes = bytes_io.getvalue() data_bytes_array = np.fromiter(data_bytes, dtype=np.uint8) data_bits_list = np.unpackbits(data_bytes_array).tolist() data_bits_list += [0] * (image.size[0] * image.size[1] - len(data_bits_list)) watermark = Image.frombytes(data=bytes(data_bits_list), size=image.size, mode='L') red, green, blue = image.split() watermarked_red = ImageMath.eval("convert(a&0xFE|b&0x1,'L')", a=red, b=watermark) watermarked_image = Image.merge("RGB", (watermarked_red, green, blue)) return watermarked_image
def load_image(fd): d = Asset(fd) tex = [i for i in d.objs if "image data" in i] assert len(tex) == 1 tex = tex[0] data = tex["image data"] width, height, fmt = tex["m_Width"], tex["m_Height"], tex["m_TextureFormat"] if fmt == 7: # BGR565 im = Image.frombytes("RGB", (width, height), data, "raw", "BGR;16") elif fmt == 13: # ABGR4444 im = Image.frombytes("RGBA", (width, height), data, "raw", "RGBA;4B") r, g, b, a = im.split() im = Image.merge("RGBA", (a, b, g, r)) else: raise Exception("Unsupported format %d" % fmt) im = im.transpose(Image.FLIP_TOP_BOTTOM) return im
def distort_image(im, hue, sat, val): im = im.convert('HSV') cs = list(im.split()) cs[1] = cs[1].point(lambda i: i * sat) cs[2] = cs[2].point(lambda i: i * val) def change_hue(x): x += hue*255 if x > 255: x -= 255 if x < 0: x += 255 return x cs[0] = cs[0].point(change_hue) im = Image.merge(im.mode, tuple(cs)) im = im.convert('RGB') #constrain_image(im) return im
def HSVColor(img): if isinstance(img, Image.Image): r,g,b = img.split() Hdat = [] Sdat = [] Vdat = [] for rd,gn,bl in zip(r.getdata(),g.getdata(),b.getdata()) : h,s,v = colorsys.rgb_to_hsv(rd/255.,gn/255.,bl/255.) Hdat.append(int(h*255.)) Sdat.append(int(s*255.)) Vdat.append(int(v*255.)) r.putdata(Hdat) g.putdata(Sdat) b.putdata(Vdat) return Image.merge('RGB',(r,g,b)) else: return None
def pillow_invert_channels(img): r, g, b = img.split() img = PILImage.merge("RGB", (b, g, r)) return img
def do_merge(self): """usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]] Merge top-of stack images in a way described by the mode. """ mode = self.do_pop() bandlist = [] for band in mode: bandlist.append(self.do_pop()) self.push(Image.merge(mode, bandlist)) # Image class methods
def original_colors(original, stylized): h, s, v = original.convert('HSV').split() hs, ss, vs = stylized.convert('HSV').split() return Image.merge('HSV', (h, s, vs)).convert('RGB')
def drawImage(self, screen, coord, data): try: r,g,b,a = data.split() fg = Image.merge("RGB", (r, g, b)) mask = Image.merge("L", (a,)) screen.paste(fg, coord, mask) except: pass
def brightness(scale): def f(img): if img.mode != 'HSV': img = img.convert('HSV') h,s,v = img.split() v = v.point(lambda x: x*scale) return Image.merge('HSV',[h,s,v]) return f # Gamma transformation of an image such that # I' = MAX * (I/255)^r
def gamma(r): # TAONOTE: Following suffers from ZeroDivisionError: 0.0 cannot be raised to a negative power g = lambda x: 255.0*(x/255.0)**r def f(img): if img.mode != 'RGB': return img.point(g) else: # Presumably only HSV h,s,v = img.split() v = v.point(g) return Image.merge('HSV',[h,s,v]) return f # Adjust image level, such that # I' = a*I + c
def hue(deg): def f(img): if img.mode != 'HSV': img = img.convert('HSV') h,s,v = img.split() h = h.point(lambda x: x+deg) return Image.merge('HSV',[h,s,v]) return f # Adjust the saturation of an image, such that # s' = s * scale
def sat(scale): def f(img): if img.mode != 'HSV': img = img.convert('HSV') h,s,v = img.split() s = s.point(lambda x: x*scale) return Image.merge('HSV',[h,s,v]) return f
def PngToBmp(f): # convert a .png image file to a .bmp image file using PIL file_in = f+".png" img = Image.open(file_in) file_out = f+".bmp" #print len(img.split()) # test if len(img.split()) == 4: # prevent IOError: cannot write mode RGBA as BMP r, g, b, a = img.split() img = Image.merge("RGB", (r, g, b)) img.save(file_out) else: img.save(file_out)
def jpg2bmp(in_path, out_path): from PIL import Image import os bmp = Image.open(in_path) r, g, b = bmp.split() img = Image.merge("RGB", (r, g, b)) img.save(out_path) os.remove(in_path) return out_path
def scale_image_channel(im, c, v): cs = list(im.split()) cs[c] = cs[c].point(lambda i: i * v) out = Image.merge(im.mode, tuple(cs)) return out
def inverse(inpng, outpng): image = Image.open(inpng) if image.mode == 'RGBA': r, g, b, a = image.split() rgb_image = Image.merge('RGB', (r, g, b)) inverted_image = PIL.ImageOps.invert(rgb_image) r2, g2, b2 = inverted_image.split() final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a)) final_transparent_image.save(outpng) else: inverted_image = PIL.ImageOps.invert(image) inverted_image.save(outpng)
def draw_bbox(step, image, name='', image_height=1, image_width=1, bbox=None, label=None, gt_label=None, prob=None): #print(prob[:,label]) source_img = Image.fromarray(image) b, g, r = source_img.split() source_img = Image.merge("RGB", (r, g, b)) draw = ImageDraw.Draw(source_img) color = '#0000ff' if bbox is not None: for i, box in enumerate(bbox): if label is not None: if prob is not None: if (prob[i,label[i]] > 0.5) and (label[i] > 0): if gt_label is not None: text = cat_id_to_cls_name(label[i]) + ' : ' + cat_id_to_cls_name(gt_label[i]) if label[i] != gt_label[i]: color = '#ff0000'#draw.text((2+bbox[i,0], 2+bbox[i,1]), cat_id_to_cls_name(label[i]) + ' : ' + cat_id_to_cls_name(gt_label[i]), fill='#ff0000') else: color = '#0000ff' else: text = cat_id_to_cls_name(label[i]) draw.text((2+bbox[i,0], 2+bbox[i,1]), text, fill=color) if _DEBUG is True: print("plot",label[i], prob[i,label[i]]) draw.rectangle(box,fill=None,outline=color) else: if _DEBUG is True: print("skip",label[i], prob[i,label[i]]) else: text = cat_id_to_cls_name(label[i]) draw.text((2+bbox[i,0], 2+bbox[i,1]), text, fill=color) draw.rectangle(box,fill=None,outline=color) return source_img.save(FLAGS.train_dir + '/est_imgs/test_' + name + '_' + str(step) +'.jpg', 'JPEG')
def invert(self, ctx, target): """ Ever wanted to see the stuff of nightmares? """ try: member = await commands.MemberConverter().convert(ctx, target) url = member.avatar_url except: url = target url = url.replace("gif", "png").strip("<>") m = await ctx.send("pls wait am generating") try: b = BytesIO() async with aiohttp.ClientSession() as session: async with session.get(url) as r: img = Image.open(BytesIO(await r.read())) bio = BytesIO() if (img.mode == 'RGBA'): r,g,b,a = img.split() rgb_image = Image.merge('RGB', (r,g,b)) inverted = ImageOps.invert(rgb_image) r,g,b = inverted.split() img = Image.merge('RGBA', (r,g,b,a)) else: img = ImageOps.invert(img) img.save(bio, "PNG") bio.seek(0) await ctx.send(file=discord.File(bio, filename="invert.png")) await m.delete() except Exception as e: print(e) await m.edit(content="Unable to generate image. Provide a mention or valid URL.")
def roll(image, delta): "Roll an image sideways" xsize, ysize = image.size delta = delta % xsize if delta == 0: return image part1 = image.crop((0, 0, delta, ysize)) part2 = image.crop((delta, 0, xsize, ysize)) # part1.show() # strange bug? # part2.show() image.paste(part2, (0, 0, xsize-delta, ysize)) image.paste(part1, (xsize-delta, 0, xsize, ysize)) return image # im = roll(im,350) # im.show() # r, g, b = im.split() # im = Image.merge("RGB", (b, g, r)) # im.show() # out = im.resize((128, 128)) # out = im.rotate(45) # out.show() # # multiply pixels by 1.2 # out = im.point(lambda i: i * 1.2) # out.show() # split the image into individual bands
def combine(y_hr, cb_lr, cr_lr): cb_hr = cb_lr.resize(y_hr.size, Image.BICUBIC) cr_hr = cr_lr.resize(y_hr.size, Image.BICUBIC) return Image.merge('YCbCr', [y_hr, cb_hr, cr_hr]).convert('RGB')
def _toqclass_helper(im): data = None colortable = None # handle filename, if given instead of image name if hasattr(im, "toUtf8"): # FIXME - is this really the best way to do this? if str is bytes: im = unicode(im.toUtf8(), "utf-8") else: im = str(im.toUtf8(), "utf-8") if isPath(im): im = Image.open(im) if im.mode == "1": format = QImage.Format_Mono elif im.mode == "L": format = QImage.Format_Indexed8 colortable = [] for i in range(256): colortable.append(rgb(i, i, i)) elif im.mode == "P": format = QImage.Format_Indexed8 colortable = [] palette = im.getpalette() for i in range(0, len(palette), 3): colortable.append(rgb(*palette[i:i+3])) elif im.mode == "RGB": data = im.tobytes("raw", "BGRX") format = QImage.Format_RGB32 elif im.mode == "RGBA": try: data = im.tobytes("raw", "BGRA") except SystemError: # workaround for earlier versions r, g, b, a = im.split() im = Image.merge("RGBA", (b, g, r, a)) format = QImage.Format_ARGB32 else: raise ValueError("unsupported image mode %r" % im.mode) # must keep a reference, or Qt will crash! __data = data or align8to32(im.tobytes(), im.size[0], im.mode) return { 'data': __data, 'im': im, 'format': format, 'colortable': colortable } ## # An PIL image wrapper for Qt. This is a subclass of PyQt's QImage # class. # # @param im A PIL Image object, or a file name (given either as Python # string or a PyQt string object).
def _toqclass_helper(im): data = None colortable = None # handle filename, if given instead of image name if hasattr(im, "toUtf8"): # FIXME - is this really the best way to do this? if str is bytes: im = unicode(im.toUtf8(), "utf-8") else: im = str(im.toUtf8(), "utf-8") if isPath(im): im = Image.open(im) if im.mode == "1": format = QImage.Format_Mono elif im.mode == "L": format = QImage.Format_Indexed8 colortable = [] for i in range(256): colortable.append(rgb(i, i, i)) elif im.mode == "P": format = QImage.Format_Indexed8 colortable = [] palette = im.getpalette() for i in range(0, len(palette), 3): colortable.append(rgb(*palette[i:i+3])) elif im.mode == "RGB": data = im.tobytes("raw", "BGRX") format = QImage.Format_RGB32 elif im.mode == "RGBA": try: data = im.tobytes("raw", "BGRA") except SystemError: # workaround for earlier versions r, g, b, a = im.split() im = Image.merge("RGBA", (b, g, r, a)) format = QImage.Format_ARGB32 else: raise ValueError("unsupported image mode %r" % im.mode) # must keep a reference, or Qt will crash! __data = data or align8to32(im.tobytes(), im.size[0], im.mode) return { 'data': __data, 'im': im, 'format': format, 'colortable': colortable }
def render_results_as_image(raster_data_path, way_bitmap, test_images, band_list, tile_size, predictions=None): """Save the source TIFF as a JPEG, with labels and data overlaid.""" timestr = time.strftime("%Y%m%d-%H%M%S") outfile = os.path.splitext(raster_data_path)[0] + '-' + timestr + ".jpeg" # TIF to JPEG bit from: # http://stackoverflow.com/questions/28870504/converting-tiff-to-jpeg-in-python im = Image.open(raster_data_path) print("GENERATING JPEG for %s" % raster_data_path) rows = len(way_bitmap) cols = len(way_bitmap[0]) t0 = time.time() r, g, b, ir = im.split() # visualize single band analysis tinted for R-G-B, # or grayscale for infrared band if sum(band_list) == 1: if band_list[3] == 1: # visualize IR as grayscale im = Image.merge("RGB", (ir, ir, ir)) else: # visualize single-color band analysis as a scale of that color zeros_band = Image.new('RGB', r.size).split()[0] if band_list[0] == 1: im = Image.merge("RGB", (r, zeros_band, zeros_band)) elif band_list[1] == 1: im = Image.merge("RGB", (zeros_band, g, zeros_band)) elif band_list[2] == 1: im = Image.merge("RGB", (zeros_band, zeros_band, b)) else: # visualize multi-band analysis as RGB im = Image.merge("RGB", (r, g, b)) t1 = time.time() print("{0:.1f}s to FLATTEN the {1} analyzed bands of TIF to JPEG".format(t1 - t0, sum( band_list))) t0 = time.time() shade_labels(im, test_images, predictions, tile_size) t1 = time.time() print("{0:.1f}s to SHADE PREDICTIONS on JPEG".format(t1 - t0)) t0 = time.time() # show raw data that spawned the labels for row in range(0, rows): for col in range(0, cols): if way_bitmap[row][col] != 0: im.putpixel((col, row), (255, 0, 0)) t1 = time.time() print("{0:.1f}s to DRAW WAYS ON JPEG".format(t1 - t0)) im.save(outfile, "JPEG")
def invert(self, ctx, url :str): if not ctx.message.mentions: if not url: url = ctx.message.author.avatar_url else: pass else: url = ctx.message.mentions[0].avatar_url # Download the image img_name = "{}.png".format(url[url.rfind("/")+1:url.rfind(".")]) try: await self.download_img(ctx, url, img_name) except asyncio.TimeoutError: await ctx.send("Image is too big.") os.remove(img_name) return except ValueError: await ctx.send("Invalid link.") return # Invert the image try: image = Image.open(img_name) width, height = image.size if (width * height) > 89478485: # Checks if image is too big await ctx.send("Image is too big.") os.remove(img_name) return if image.mode == "RGBA": image.load() r, g, b, a = image.split() image = Image.merge("RGB", (r, g, b)) image = ImageOps.invert(image) r, g, b = image.split() image = Image.merge("RGBA", (r, g, b, a)) else: image = ImageOps.invert(image) except NotImplementedError: await ctx.send("Image format not supported.") os.remove(img_name) return except OSError: await ctx.send("Link not supported.") os.remove(img_name) return image.save(img_name) await ctx.channel.send(file=discord.File(img_name)) os.remove(img_name)
def image_to_string(image, lang=None, boxes=False, config=None): ''' Runs tesseract on the specified image. First, the image is written to disk, and then the tesseract command is run on the image. Resseract's result is read, and the temporary files are erased. also supports boxes and config. if boxes=True "batch.nochop makebox" gets added to the tesseract call if config is set, the config gets appended to the command. ex: config="-psm 6" ''' if len(image.split()) == 4: # In case we have 4 channels, lets discard the Alpha. # Kind of a hack, should fix in the future some time. r, g, b, a = image.split() image = Image.merge("RGB", (r, g, b)) input_file_name = '%s.bmp' % tempnam() output_file_name_base = tempnam() if not boxes: output_file_name = '%s.txt' % output_file_name_base else: output_file_name = '%s.box' % output_file_name_base try: image.save(input_file_name) status, error_string = run_tesseract(input_file_name, output_file_name_base, lang=lang, boxes=boxes, config=config) if status: errors = get_errors(error_string) raise TesseractError(status, errors) f = open(output_file_name) try: return f.read().strip() finally: f.close() finally: cleanup(input_file_name) cleanup(output_file_name)
def main(): if len(sys.argv) == 2: filename = sys.argv[1] try: image = Image.open(filename) if len(image.split()) == 4: # In case we have 4 channels, lets discard the Alpha. # Kind of a hack, should fix in the future some time. r, g, b, a = image.split() image = Image.merge("RGB", (r, g, b)) except IOError: sys.stderr.write('ERROR: Could not open file "%s"\n' % filename) exit(1) print(image_to_string(image)) elif len(sys.argv) == 4 and sys.argv[1] == '-l': lang = sys.argv[2] filename = sys.argv[3] try: image = Image.open(filename) except IOError: sys.stderr.write('ERROR: Could not open file "%s"\n' % filename) exit(1) print(image_to_string(image, lang=lang)) else: sys.stderr.write('Usage: python tesseract.py [-l language] input_file\n') exit(2)
def _toqclass_helper(im): data = None colortable = None # handle filename, if given instead of image name if hasattr(im, "toUtf8"): # FIXME - is this really the best way to do this? if str is bytes: im = unicode(im.toUtf8(), "utf-8") else: im = str(im.toUtf8(), "utf-8") if isPath(im): im = Image.open(im) if im.mode == "1": format = QImage.Format_Mono elif im.mode == "L": format = QImage.Format_Indexed8 colortable = [] for i in range(256): colortable.append(rgb(i, i, i)) elif im.mode == "P": format = QImage.Format_Indexed8 colortable = [] palette = im.getpalette() for i in range(0, len(palette), 3): colortable.append(rgb(*palette[i:i+3])) elif im.mode == "RGB": data = im.tobytes("raw", "BGRX") format = QImage.Format_RGB32 elif im.mode == "RGBA": try: data = im.tobytes("raw", "BGRA") except SystemError: # workaround for earlier versions r, g, b, a = im.split() im = Image.merge("RGBA", (b, g, r, a)) format = QImage.Format_ARGB32 else: raise ValueError("unsupported image mode %r" % im.mode) __data = data or align8to32(im.tobytes(), im.size[0], im.mode) return { 'data': __data, 'im': im, 'format': format, 'colortable': colortable }