我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用PIL.Image.frombuffer()。
def displayImage(image): global grey_palette l_height = SCREEN_HEIGHT l_width = SCREEN_WIDTH pgImage = pygame.image.frombuffer(image.tobytes(), image.size, 'P' ) pgImage.set_palette(grey_palette) i_width = pgImage.get_width() i_height = pgImage.get_height() pgImage = pygame.transform.scale( pgImage, (int(i_width*(1.0*l_height/i_height)), l_height) ) screen.fill(backgroundColor) screen.blit(pgImage, ((l_width - pgImage.get_width())/2, (l_height-pgImage.get_height())/2)) pygame.display.flip()
def _make_image_from_buffer(self, hCaptureBitmap): from PIL import Image bmp_info = BITMAPINFO() bmp_header = BITMAPFILEHEADER() hdc = user32.GetDC(None) bmp_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER) DIB_RGB_COLORS = 0 gdi.GetDIBits(hdc, hCaptureBitmap, 0,0, None, byref(bmp_info), DIB_RGB_COLORS ) bmp_info.bmiHeader.biSizeImage = int(bmp_info.bmiHeader.biWidth *abs(bmp_info.bmiHeader.biHeight) * (bmp_info.bmiHeader.biBitCount+7)/8); size = (bmp_info.bmiHeader.biWidth, bmp_info.bmiHeader.biHeight ) # print(size) pBuf = (c_char * bmp_info.bmiHeader.biSizeImage)() gdi.GetBitmapBits(hCaptureBitmap, bmp_info.bmiHeader.biSizeImage, pBuf) return Image.frombuffer('RGB', size, pBuf, 'raw', 'BGRX', 0, 1)
def writeMetaData(imgname, tag, replace): try: metaData = {} imgFile = Image.open(imgname) imgData = imgFile.getdata() print "Getting meta data..." info = imgFile._getexif() if info: print "found meta data!" i = 0 for (tag, value) in info.items(): i = i + 1 tagname = TAGS.get(tag, tag) #if tagname == tagToReplace: #info.items()[i][1] = replace print "Saving Change" outImg = Image.frombuffer("RGBX",len(imgData)+len(info),imgData+info) outImg.save(img) except: print "Failed"
def add_label_mask(self, label, mask): """ Save the given label to file. Argument label can be a color or a string Argument boolean image, True where label will be set. """ if isinstance(label, str): label = next(c for c, nm in self.names.items() if nm == label) assert(isinstance(label, tuple) and len(label) == 4) # Colors are tuples of 4 ints assert(mask.dtype == np.bool) img = Image.open(self.current_label) data = np.array(img, dtype=np.uint8) data[mask] = label size = tuple(reversed(data.shape[:2])) new_img = Image.frombuffer("RGBA", size, data, "raw", "RGBA", 0, 1) new_img.save(self.current_label)
def _fix_alpha_channel(self): # This is a fix for a bug where the Alpha channel was dropped. colors3to4 = [(c[:3], c[3]) for c in self.names.keys()] colors3to4 = dict(colors3to4) assert(len(colors3to4) == len(self.names)) # Dropped alpha channel causes colors to collide :( for lbl in self.labels: if lbl is None: continue # No label file created yet. img = Image.open(lbl) size = img.size img = np.array(img) if img.shape[2] == 4: continue # Image has alpha channel, good. elif img.shape[2] == 3: # Lookup each (partial) color and find what its alpha should be. alpha = np.apply_along_axis(lambda c: colors3to4[tuple(c)], 2, img) data = np.dstack([img, np.array(alpha, dtype=np.uint8)]) new_img = Image.frombuffer("RGBA", size, data, "raw", "RGBA", 0, 1) new_img.save(lbl) print("FIXED", lbl)
def _execute_saving(self): if self.flag < 3: self.flag += 1 return size_x, size_y = self._plot._window.get_size() size = size_x*size_y*4*sizeof(c_ubyte) image = create_string_buffer(size) glReadPixels(0, 0, size_x, size_y, GL_RGBA, GL_UNSIGNED_BYTE, image) from PIL import Image im = Image.frombuffer('RGBA', (size_x, size_y), image.raw, 'raw', 'RGBA', 0, 1) im.transpose(Image.FLIP_TOP_BOTTOM).save(self.outfile, self.format) self.flag = 0 self.screenshot_requested = False if self.invisibleMode: self._plot._window.close()
def extract_plane_r36compat(frame: VideoFrame, planeno: int, *, compat: bool=False) -> Image.Image: """ Extracts the plane using the old VapourSynth API for reading a frame. Since we are doing raw memory operations using ctypes, this function has proven to be prone to SIGSEGV while developing. This code will subseqently be dropped from this codebase when VapourSynth r36 is officially dropped with the official release of R39. :param frame: The frame :param planeno: The plane number :param compat: Are we dealing with a compat format. :return: The extracted image. """ width, height = calculate_size(frame, planeno) stride = frame.get_stride(planeno) s_plane = height * stride buf = (ctypes.c_byte*s_plane).from_address(frame.get_read_ptr(planeno).value) if not compat: return Image.frombuffer('L', (width, height), buf, "raw", "L", stride, -1) else: return Image.frombuffer('RGB', (width, height), buf, "raw", "BGRX", stride, -1)
def extract_plane_new(frame: VideoFrame, planeno: int, *, compat: bool=False) -> Image.Image: """ Extracts the plane with the VapourSynth R37+ array-API. :param frame: The frame :param planeno: The plane number :param compat: Are we dealing with a compat format. :return: The extracted image. """ arr = frame.get_read_array(planeno) height, width = arr.shape stride = frame.format.bytes_per_sample * width if not compat: return Image.frombuffer('L', (width, height), bytes(arr), "raw", "L", stride, -1) else: return Image.frombuffer('RGB', (width, height), bytes(arr), "raw", "BGRX", stride, -1)
def xy_image(self, z_index=0, t_index=0): """Render an image in the XY plane. Args: z_index: Optional Z index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ _, zdim, ydim, xdim = self.data.shape imagemap = np.zeros([ydim, xdim], dtype=np.uint64) # false color redrawing of the region ndlib.recolor_ctype(self.data[t_index, z_index, :, :].reshape((ydim, xdim)), imagemap) return Image.frombuffer('RGBA', (xdim, ydim), imagemap.astype(dtype=np.uint32), 'raw', 'RGBA', 0, 1)
def xz_image(self, z_scale=1, y_index=0, t_index=0): """Render an image in the xz plane. Args: z_scale: Scaling factor for the z-dimension. Useful for rendering non-isotropic data y_index: Optional Y index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ _, zdim, ydim, xdim = self.data.shape imagemap = np.zeros([zdim, xdim], dtype=np.uint64) # false color redrawing of the region ndlib.recolor_ctype(self.data[t_index, :, y_index, :].reshape((zdim, xdim)), imagemap) outimage = Image.frombuffer('RGBA', (xdim, zdim), imagemap.astype(dtype=np.uint32), 'raw', 'RGBA', 0, 1) return outimage.resize([xdim, int(zdim * z_scale)])
def yz_image(self, z_scale=1, x_index=0, t_index=0): """Render an image in the yz plane. Args: z_scale: Scaling factor for the z-dimension. Useful for rendering non-isotropic data x_index: Optional X index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ _, zdim, ydim, xdim = self.data.shape imagemap = np.zeros([zdim, ydim], dtype=np.uint64) # false color redrawing of the region ndlib.recolor_ctype(self.data[t_index, :, :, x_index].reshape((zdim, ydim)), imagemap) outimage = Image.frombuffer('RGBA', (ydim, zdim), imagemap.astype(dtype=np.uint32), 'raw', 'RGBA', 0, 1) return outimage.resize([ydim, int(zdim * z_scale)]) # TODO: Implement zoom in/zoom out once propagation is implemented
def xz_image(self, z_scale=1, y_index=0, t_index=0): """Render an image in the xz plane. Args: z_scale: Scaling factor for the z-dimension. Useful for rendering non-isotropic data y_index: Optional Y index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ time, z_dim, y_dim, x_dim = self.data.shape out_image = Image.frombuffer('L', (x_dim, z_dim), self.data[t_index, :, y_index, :].flatten(), 'raw', 'L', 0, 1) # TODO: DMK - ask KL about this comment: # if the image scales to 0 pixels it don't work return out_image.resize([x_dim, int(z_dim * z_scale)])
def xy_image(self, z_index=0, t_index=0): """Render an image in the XY plane. Args: z_index: Optional Z index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ # This works for 16-> conversions _, z_dim, y_dim, x_dim = self.data.shape # If data type is uint8 you got windowed data FROM the API layer. Otherwise limit output range. if self.data.dtype == np.uint8: return Image.frombuffer('L', (x_dim, y_dim), self.data[t_index, z_index, :, :].flatten(), 'raw', 'L', 0, 1) else: out_image = Image.frombuffer('I;16', (x_dim, y_dim), self.data[t_index, z_index, :, :].flatten(), 'raw', 'I;16', 0, 1) return out_image.point(lambda i: i * (1. / 256)).convert('L')
def xz_image(self, z_scale=1, y_index=0, t_index=0): """Render an image in the xz plane. Args: z_scale: Scaling factor for the z-dimension. Useful for rendering non-isotropic data y_index: Optional Y index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ _, z_dim, y_dim, x_dim = self.data.shape # If data type is uint8 you got windowed data FROM the API layer. Otherwise limit output range. if self.data.dtype == np.uint8: out_image = Image.frombuffer('L', (x_dim, z_dim), self.data[t_index, :, y_index, :].flatten(), 'raw', 'L', 0, 1) else: out_image = Image.frombuffer('I;16', (x_dim, z_dim), self.data[t_index, :, y_index, :].flatten(), 'raw', 'I;16', 0, 1) out_image = out_image.point(lambda i: i * (1. / 256)).convert('L') return out_image.resize([x_dim, int(z_dim * z_scale)])
def yz_image(self, z_scale=1, x_index=0, t_index=0): """Render an image in the yz plane. Args: z_scale: Scaling factor for the z-dimension. Useful for rendering non-isotropic data x_index: Optional X index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ _, z_dim, y_dim, x_dim = self.data.shape # If data type is uint8 you got windowed data FROM the API layer. Otherwise limit output range. if self.data.dtype == np.uint8: out_image = Image.frombuffer('L', (y_dim, z_dim), self.data[t_index, :, :, x_index].flatten(), 'raw', 'L', 0, 1) else: out_image = Image.frombuffer('I;16', (y_dim, z_dim), self.data[t_index, :, :, x_index].flatten(), 'raw', 'I;16', 0, 1) out_image = out_image.point(lambda i: i * (1. / 256)).convert('L') return out_image.resize([y_dim, int(z_dim * z_scale)])
def xz_image(self, z_scale=1, y_index=0): """Render an image in the xz plane. Mut be overridden in child class to deal with data types and shape Example for uin8 based cube: zdim, ydim, xdim = self.data.shape out_image = Image.frombuffer('L', (xdim, zdim), self.data[:, y_index, :].flatten(), 'raw', 'L', 0, 1) return out_image.resize([xdim, int(zdim*z_scale)]) Args: z_scale: Scaling factor for the z-dimension. Useful for rendering non-isotropic data y_index: Optional Y index into the data matrix from which to render the image. Returns: Image """ return NotImplemented
def to_image(self): """Convert the Frame to a PIL :py:class:`Image` instance.""" if self.format is FrameFormat.BGRX: return Image.frombuffer( 'RGB', (self.width, self.height), self.data, 'raw', 'BGRX') elif self.format is FrameFormat.RGBX: return Image.frombuffer( 'RGB', (self.width, self.height), self.data, 'raw', 'RGBX') elif self.format is FrameFormat.Gray: return Image.frombuffer( 'L', (self.width, self.height), self.data, 'raw', 'L') elif self.format is FrameFormat.Float: return Image.frombuffer( 'F', (self.width, self.height), self.data, 'raw', 'F') else: raise NotImplementedError()
def to_array(self): """Convert the image to a numpy :py:class:`array` instance. The memory is not copied so be careful performing any operations which modify the contents of the frame. """ if self.format is FrameFormat.BGRX or self.format is FrameFormat.RGBX: return np.frombuffer( self.data, dtype='uint8').reshape( (self.height, self.width, 4), order='C') elif self.format is FrameFormat.Gray: return np.frombuffer( self.data, dtype='uint8').reshape( (self.height, self.width), order='C') elif self.format is FrameFormat.Float: return np.frombuffer( self.data, dtype='float32').reshape( (self.height, self.width), order='C') else: raise NotImplementedError()
def _get_image_from_frame_data(self, frame_data): """Convert an image from data to a usable format. Args: frame_data (``imageio.core.util.Image``): an imageio image Returns: ``PIL.Image.Image``: and image object """ mode = 'RGB' size = self.video.get_meta_data()['source_size'] decoder = 'raw' return Image.frombuffer(mode, size, frame_data, decoder, mode, 0, 1)
def read_mk(fobj, start_length, size): # Alpha masks seem to be uncompressed start = start_length[0] fobj.seek(start) pixel_size = (size[0] * size[2], size[1] * size[2]) sizesq = pixel_size[0] * pixel_size[1] band = Image.frombuffer( "L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1 ) return {"A": band}
def write(self, s): global lcd b1 = bytearray() b1.extend(s[:(P_WIDTH*P_HEIGHT)]) mi = min(b1) ma = max(b1) ra = ma-mi b2 = bytearray() for pix in range(P_WIDTH*P_HEIGHT): b2.append( (b1[pix]*(255/ra))-mi ) print(max(b1), min(b1), max(b2), min(b2)) image = Image.frombuffer('L', P_SIZE, b2, "raw", 'L', 0, 1) image.thumbnail(S_SIZE, Image.NEAREST) # draw = ImageDraw.Draw(image) # font = ImageFont.truetype('arial.ttf', 18) # # draw.rectangle([(0, 0), (115, 22)], fill=255, outline=0) # draw.text((2, 2), "TESt *", fill='black', font=font) image = ImageOps.invert(image) image = image.convert('1') lcd.write(image.tobytes())
def write(self, s): global lcd image = Image.frombuffer('L', P_SIZE, s, "raw", 'L', 0, 1) image = image.crop((self.x, 0, self.x+1, P_HEIGHT)) self.image_scan.paste(image,(self.x, 0)) if self.x < P_WIDTH-1: self.x += 1 image = ImageOps.invert(self.image_scan) image.thumbnail(S_SIZE, Image.NEAREST) image = image.convert('1') lcd.write(image.tobytes())
def write(self, s): global lcd self.size += len(s) image = Image.frombuffer('L', (416, 240), s, "raw", 'L', 0, 1) image = image.crop((0, 0, S_WIDTH, S_HEIGHT)) image = ImageOps.invert(image) image = image.convert('1') lcd.write(image.tobytes())
def write(self, s): global lcd image = Image.frombuffer('L', (416, 240), s, "raw", 'L', 0, 1) image = image.crop((8, 0, S_WIDTH+8, S_HEIGHT)) image = ImageOps.invert(image) image = image.convert('1') lcd.write(image.tobytes())
def arr2img(arr): # arr is expected to be a 2d array of floats in [0,1] return Image.frombuffer('L', arr.shape, (arr * 255).astype(np.uint8).tostring(), 'raw', 'L', 0, 1)
def pil_save(filename, pixels, width, height): from PIL import Image, ImageFile buffer_len = (width * 3 + 3) & -4 img = Image.frombuffer('RGB', (width, height), pixels, 'raw', 'BGR', buffer_len, 1) ImageFile.MAXBLOCK = width * height img=img.transpose(Image.FLIP_TOP_BOTTOM) img.save(filename, quality=95, optimize=True, progressive=True) logging.info('Screenshot saved to %s'%filename)
def pil_save(filename, pixels, width, height): from PIL import Image, ImageFile buffer_len = (width * 3 + 3) & -4 img = Image.frombuffer('RGB', (width, height), pixels, 'raw', 'BGR', buffer_len, 1) ImageFile.MAXBLOCK = width * height img=img.transpose(Image.FLIP_TOP_BOTTOM) img.save(filename, quality=95, optimize=True, progressive=True) logging.info('webcam snap saved to %s'%filename)
def pil_save(filename, pixels, width, height): from PIL import Image, ImageFile buffer_len = (width * 3 + 3) & -4 img = Image.frombuffer('RGB', (width, height), pixels, 'raw', 'BGR', buffer_len, 1) ImageFile.MAXBLOCK = width * height img=img.transpose(Image.FLIP_TOP_BOTTOM) img.save(filename, quality=95, optimize=True, progressive=True)
def screen(self): """PIL Image of current window screen. reference: https://msdn.microsoft.com/en-us/library/dd183402(v=vs.85).aspx""" hwnd = win32gui.GetDesktopWindow() left, top, right, bottom = self.rect width, height = right-left, bottom-top # copy bits to temporary dc win32gui.BitBlt(self._hdcmem, 0, 0, width, height, self._hdcwin, left, top, win32con.SRCCOPY) # read bits into buffer windll.gdi32.GetDIBits(self._hdcmem, self._hbmp.handle, 0, height, self._buf, ctypes.byref(self._bi), win32con.DIB_RGB_COLORS) # make a PIL Image img = Image.frombuffer('RGB', (width, height), self._buf, 'raw', 'BGRX', 0, 1) img = img.transpose(Image.FLIP_TOP_BOTTOM) return img
def on_screenshot_received(self, data): screen_dict = data['payload'] try: screen_info = ast.literal_eval(screen_dict) im = Image.frombuffer('RGB', (int(screen_info['width']), int(screen_info['height'])), zlib.decompress(screen_info['screenshotbits']), 'raw', 'BGRX', 0, 1) screen_bits = im.convert('RGBA') self.screenshot = Screenshot(QPixmap.fromImage(ImageQt.ImageQt(screen_bits)).scaled( self.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation)) self.setCentralWidget(self.screenshot) self.current_bits = screen_bits self.loading.hide() except SyntaxError: pass
def save_image(screen_info, client_id, storage): date = screen_info['date'].split('_')[0] dir = check_client_storage(storage, client_id, date) image = Image.frombuffer('RGB', (screen_info['width'], screen_info['height']), zlib.decompress(screen_info['screen_bits']), 'raw', 'BGRX', 0, 1) screen_bits = image.convert('RGBA') screen_path = os.path.join(dir, '%s.png' % screen_info['date']) screen_bits.save(screen_path, 'png') return screen_path, screen_info['date'], screen_info['title_name'], date
def make_screenshot(x1, y1, x2, y2): #w, h = x1+x2, y1+y2 w = x2 - x1 h = y2 - y1 size = w * h objlength = size * 3 SCREENSHOT_LIB.getScreen.argtypes = [] result = (ctypes.c_ubyte * objlength)() SCREENSHOT_LIB.getScreen(x1, y1, w, h, result) #return Image.frombuffer('RGB', (w, h), result, 'raw', 'RGB', 0, 1) img_flat = np.frombuffer(result, dtype=np.uint8) return img_flat.reshape((h, w, 3))
def _saveImage(img, imgsize, destFile): mode = 'RGBA' arr = img.reshape(img.shape[0]*img.shape[1], img.shape[2]) if len(arr[0]) == 3: arr = np.c_[arr, 255*np.ones((len(arr),1), np.uint8)] img = Image.frombuffer(mode, imgsize, arr.tostring(), 'raw', mode, 0, 1) img.save(destFile)
def _handle_frame(self, port, buf): try: out1 = self.outputs[0].get_buffer(False) #out2 = self.outputs[1].get_buffer(False) except PiCameraPortDisabled: return True if out1: # copy the input frame to the first output buffer out1.copy_from(buf) with out1 as data: # construct an Image using the Y plane of the output # buffer's data and tell PIL we can write to the buffer img = Image.frombuffer('L', port.framesize, data, 'raw', 'L', 0, 1) img.readonly = False with self._lock: if self._clock_image: img.paste(self._clock_image, (10, 10), self._clock_image) # if we've got a second output buffer replicate the first # buffer into it (note the difference between replicate and # copy_from) # if out2: # out2.replicate(out1) try: self.outputs[0].send_buffer(out1) except PiCameraPortDisabled: return True # if out2: # try: # self.outputs[1].send_buffer(out2) # except PiCameraPortDisabled: # return True return False
def xy_image(self, z_index=0, t_index=0): """Render an image in the XY plane. Args: z_index: Optional Z index into the data matrix from which to render the image. t_index: Optional time sample index into the data matrix from which to render the image. Returns: Image """ time, z_dim, y_dim, x_dim = self.data.shape return Image.frombuffer('L', (x_dim, y_dim), self.data[t_index, z_index, :, :].flatten(), 'raw', 'L', 0, 1)