我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用PIL.Image.open()。
def generate_img(output="", theme={}, text="", resolution=(1920,1080)): # img = Image.open(backdrop) img = Image.new("RGB", resolution, theme["background"]) W, H = img.size logo = Image.open(DEFAULT_DIR+"/assets/logo.png") colorized_img = ImageOps.colorize(logo.convert("L"), theme["text"], theme["background"]) size = int((W/100)*17) logo_newsize = colorized_img.resize((size, size), Image.ANTIALIAS) img.paste(logo_newsize, (int((W-size)/2), int((H-size)/2))) draw = ImageDraw.Draw(img) base_font_pixle = int((56/1920)*resolution[0]) font = ImageFont.truetype("DejaVuSansMono.ttf", base_font_pixle) w, h = font.getsize(text) text_draw(draw, text, base_font_pixle, img.size, theme) img.save(output, quality=100)
def fromqimage(im): buffer = QBuffer() buffer.open(QIODevice.ReadWrite) # preserve alha channel with png # otherwise ppm is more friendly with Image.open if im.hasAlphaChannel(): im.save(buffer, 'png') else: im.save(buffer, 'ppm') b = BytesIO() try: b.write(buffer.data()) except TypeError: # workaround for Python 2 b.write(str(buffer.data())) buffer.close() b.seek(0) return Image.open(b)
def login_after_captcha(self, response): with open("captcha.jpg", "wb") as f: f.write(response.body) f.close() from PIL import Image try: im = Image.open('captcha.jpg') im.show() im.close() except: pass captcha = input("?????\n>") post_data = response.meta.get("post_data", {}) post_url = "https://www.zhihu.com/login/phone_num" post_data["captcha"] = captcha return [scrapy.FormRequest( url=post_url, formdata=post_data, headers=self.headers, callback=self.check_login )]
def get_captcha(): import time t = str(int(time.time()*1000)) captcha_url = "https://www.zhihu.com/captcha.gif?r={0}&type=login".format(t) t = session.get(captcha_url, headers=header) with open("captcha.jpg","wb") as f: f.write(t.content) f.close() from PIL import Image try: im = Image.open('captcha.jpg') im.show() im.close() except: pass captcha = input("?????\n>") return captcha
def make_end_page(final_results, robot_name_to_img): with open('common/replay/{}/final.html'.format(replay_name), 'w') as file: print('<html><body><div style="text-align:center">', end='', file=file) print('<h1>Final Results</h1><br/>', file=file) print('<table style="margin:0 auto;border: 1px solid black">', file=file) print('<caption>Rankings</caption><tr>', file=file) for title in ['Rank', 'Robot', 'Name', 'Max Flag Scored', 'Total Flags Scored', 'Iterations Survived', 'Robots Alive']: print('<th style="border: 1px solid black">{}</th>'.format(title), end='', file=file) print('</tr>', file=file) for brain in sorted(final_results.brains, key = lambda x: x.placement): image = image_dir + robot_name_to_img[brain.name] print('<tr>', end='', file=file) for item in [brain.placement, '<image src="{}"/>'.format(image), brain.name, brain.max_flag, brain.total_flags, brain.iterations_survived, brain.robots_alive]: print('<td style="border: 1px solid black">{}</td>'.format(item), end='', file=file) print('</tr>', file=file) print('</table>', file=file) print('</div></body></html>', file=file)
def cache_images(brains): image_dir = 'common/replay/images/' cached_images['pit'] = Image.open(image_dir + 'pit.gif') cached_images[api.WALL] = Image.open(image_dir + 'wall.gif') cached_images[api.CORPSE] = Image.open(image_dir + 'corpse.gif') cached_images[api.MOUNTED_LASER] = Image.open(image_dir + 'mounted.gif') cached_images[api.LEFT_SPINNER] = Image.open(image_dir + 'spinner.gif') cached_images[api.RIGHT_SPINNER] = cached_images[api.LEFT_SPINNER].rotate(180) cached_images[api.EMPTY] = Image.open(image_dir + 'floor.gif') for i in range(1, 9): cached_images[api.FLAG + str(i)] = Image.open(image_dir + api.FLAG + str(i) + '.gif') for i in range(len(brains)): cached_images[brains[i].name] = Image.open(image_dir + 'robot_{}.gif'.format(i + 1)) image_rotation[api.AHEAD] = 0 image_rotation[api.BEHIND] = 180 image_rotation[api.LEFT] = 90 image_rotation[api.RIGHT] = 270
def __init__(self, root=None, transform=None, target_transform=None): self.env = lmdb.open( root, max_readers=1, readonly=True, lock=False, readahead=False, meminit=False) if not self.env: print('cannot creat lmdb from %s' % (root)) sys.exit(0) with self.env.begin(write=False) as txn: nSamples = int(txn.get('num-samples')) self.nSamples = nSamples self.transform = transform self.target_transform = target_transform
def pil_image_to_pygame_surface(pil_image): """Convert PIL Image() to RGBA pygame Surface. Args: pil_image (Image): image to convert to pygame.Surface(). Returns: pygame.Surface: the converted image Example: >>> from PIL import Image >>> gif = Image.open('demo/test_scene/test.gif') >>> AnimatedSprite.pil_image_to_pygame_surface(gif) <Surface(10x10x32 SW)> """ image_as_string = pil_image.convert('RGBA').tobytes() return pygame.image.fromstring(image_as_string, pil_image.size, 'RGBA')
def has_tomatoes(self, im_path): # load the image im = Image.open(im_path) im = np.asarray(im, dtype=np.float32) im = self.prepare_image(im) # launch an inference with the image pred = self.sess.run( self.output_logits, feed_dict={ self.img_feed: im.eval( session=self.sess)}) if np.argmax(pred) == 0: print("NOT a tomato ! (confidence : ", pred[0, 0], "%)") else: print("We have a tomato ! (confidence : ", pred[0, 1], "%)")
def print_chapters(self,show=False): ''' ?????? Display infos of chapters. ''' headers={'use-agent':"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36",'Referer':'http://manhua.dmzj.com/tags/s.shtml'} text='There are {n} chapters in comic {c}:\n{chs}'.format(n=self.chapter_num,c=self.comic_title,chs='\n'.join([info[0] for info in self.chapter_urls])) print(text) if show: try: res=requests.get(self.cover,headers=headers) if b'403' in res.content: raise ValueError('Got cover img failed') out=BytesIO(res.content) out.seek(0) Image.open(out).show() except (ConnectionError,ValueError): traceback.print_exc() return text
def writeBinaray(outputFile, imagePath, label): img = Image.open(imagePath) img = img.resize((imageSize, imageSize), PIL.Image.ANTIALIAS) img = (np.array(img)) r = img[:,:,0].flatten() g = img[:,:,1].flatten() b = img[:,:,2].flatten() label = [label] out = np.array(list(label) + list(r) + list(g) + list(b), np.uint8) outputFile.write(out.tobytes()) # if you want to show the encoded image. set up 'debugEncodedImage' flag if debugEncodedImage: showImage(r, g, b)
def resize_image(path, width): """ Resizes the image. :param str path: The path to the image file. :param int width: The width of the output image. """ try: img = Image.open(path) ratio = width / float(img.width) height = int(float(img.height) * float(ratio)) new_img = img.resize((width, height), Image.ANTIALIAS) new_img.save(path) except IOError: pass # ----------------------------------------------------------------------------------------------------------------------
def getverify(name): rep = {'O': '0', 'A': '8', 'I': '1', 'L': '1', 'Z': '2', 'S': '8', 'E': '6', 'G': '9', 'B': '6', ' ': '' } im = Image.open(name) # ?????????? im = im.convert('L') # ?? ??? binary_image = im.point(initTable(), '1') # ???? text = image_to_string(binary_image, config='-psm 7').upper() for r in rep: text = text.replace(r, rep[r]) return text # ?????????
def _load_pilfont(self, filename): fp = open(filename, "rb") for ext in (".png", ".gif", ".pbm"): try: fullname = os.path.splitext(filename)[0] + ext image = Image.open(fullname) except: pass else: if image and image.mode in ("1", "L"): break else: raise IOError("cannot find glyph data file") self.file = fullname return self._load_pilfont_data(fp, image)
def _save(im, fp, filename): if im.mode[0] != "F": im = im.convert('F') hdr = makeSpiderHeader(im) if len(hdr) < 256: raise IOError("Error creating Spider header") # write the SPIDER header try: fp = open(filename, 'wb') except: raise IOError("Unable to open %s for writing" % filename) fp.writelines(hdr) rawmode = "F;32NF" # 32-bit native floating point ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 0, (rawmode, 0, 1))]) fp.close()
def open(filename): # FIXME: modify to return a WalImageFile instance instead of # plain Image object ? if hasattr(filename, "read"): fp = filename else: fp = builtins.open(filename, "rb") # read header fields header = fp.read(32+24+32+12) size = i32(header, 32), i32(header, 36) offset = i32(header, 40) # load pixel data fp.seek(offset) im = Image.frombytes("P", size, fp.read(size[0] * size[1])) im.putpalette(quake2palette) im.format = "WAL" im.format_description = "Quake2 Texture" # strings are null-terminated im.info["name"] = header[:32].split(b"\0", 1)[0] next_name = header[56:56+32].split(b"\0", 1)[0] if next_name: im.info["next_name"] = next_name return im
def getOpenProfile(profileFilename): """ (pyCMS) Opens an ICC profile file. The PyCMSProfile object can be passed back into pyCMS for use in creating transforms and such (as in ImageCms.buildTransformFromOpenProfiles()). If profileFilename is not a vaild filename for an ICC profile, a PyCMSError will be raised. :param profileFilename: String, as a valid filename path to the ICC profile you wish to open, or a file-like object. :returns: A CmsProfile class object. :exception PyCMSError: """ try: return ImageCmsProfile(profileFilename) except (IOError, TypeError, ValueError) as v: raise PyCMSError(v)
def grab(bbox=None): if sys.platform == "darwin": f, file = tempfile.mkstemp('.png') os.close(f) subprocess.call(['screencapture', '-x', file]) im = Image.open(file) im.load() os.unlink(file) else: size, data = grabber() im = Image.frombytes( "RGB", size, data, # RGB, 32-bit line padding, origo in lower left corner "raw", "BGR", (size[0]*3 + 3) & -4, -1 ) if bbox: im = im.crop(bbox) return im
def __init__(self, image=None, **kw): # Tk compatibility: file or data if image is None: if "file" in kw: image = Image.open(kw["file"]) del kw["file"] elif "data" in kw: from io import BytesIO image = Image.open(BytesIO(kw["data"])) del kw["data"] self.__mode = image.mode self.__size = image.size if _pilbitmap_check(): # fast way (requires the pilbitmap booster patch) image.load() kw["data"] = "PIL:%d" % image.im.id self.__im = image # must keep a reference else: # slow but safe way kw["data"] = image.tobitmap() self.__photo = tkinter.BitmapImage(**kw)
def get_header_keyword(fits_file, keyword, ext=0): ''' Get the value of a header keyword in a fits file optionally using an extension. ''' hdulist = pyfits.open(fits_file) if keyword in hdulist[ext].header: val = hdulist[ext].header[keyword] else: val = None hdulist.close() return val
def get_header_keyword_list(fits_file, keyword_list, ext=0): hdulist = pyfits.open(fits_file) out_dict = {} for keyword in keyword_list: if keyword in hdulist[ext].header: out_dict[keyword] = hdulist[ext].header[keyword] else: out_dict[keyword] = None hdulist.close() return out_dict ## IMAGE SCALING FUNCTIONS ##
def loadImageSeries(filelist=None): " create a list of Image.images for use in montage " if filelist is None or len(filelist) < 1: return imglist = [] for img in filelist: if not os.path.exists(img): print("unable to find %s" % img) continue try: im = Image.open(img).convert2byte() except: if not isSpiderImage(img): print(img + " is not a Spider image file") continue im.info['filename'] = img imglist.append(im) return imglist # -------------------------------------------------------------------- # For saving images in Spider format
def show_one_img_mask(data): w,h = 1918,1280 a = randint(0,31) path = "../input/test" data = np.load(data).item() name,masks = data['name'][a],data['pred'] img = Image.open("%s/%s"%(path,name)) #img.show() plt.imshow(img) plt.show() mask = np.squeeze(masks[a]) mask = imresize(mask,[h,w]).astype(np.float32) print(mask.shape,mask[0]) img = Image.fromarray(mask*256)#.resize([w,h]) plt.imshow(img) plt.show()
def write_tfrecord(self, img_list, label_list, record_path): # write a single tfrecord if os.path.exists(record_path): print ("%s exists!"%record_path) return self._check_list() print ("write %s"%record_path) self._write_info() writer = tf.python_io.TFRecordWriter(record_path) c = 0 for imgname,label in zip(img_list,label_list): img = Image.open(imgname).resize((self.flags.width, self.flags.height)) data = np.array(img).astype(np.uint8) img,data = self._check_color(img,data) example = self._get_example(data,label) writer.write(example.SerializeToString()) c+=1 if c%LOG_EVERY == 0: print ("%d images written to tfrecord"%c) writer.close() print("writing %s done"%record_path)
def get_exif_date_exif(jpegfn): """return EXIF datetime using exifread (formerly EXIF)""" dt_value = None f = open(jpegfn, 'rb') try: tags = exifread.process_file(f) if DEBUG: print('tags cnt: %d' % len(tags)) print('\n'.join(tags)) for dt_tag in DT_TAGS: try: dt_value = '%s' % tags[dt_tag] if DEBUG: print('%s: %s' % (dt_tag, dt_value)) break except: continue if dt_value: exif_time = exif_info2time(dt_value) return exif_time finally: f.close() return None
def createIconGD(file, size=100, raw=True): """ Implements the actual logic behind creating the icon/thumbnail :type file: str :param file: path to the file name :rtype: image :return: icon/thumbnail for the video """ image = Image.open(file) width, height = image.size if width > height: y = 0 x = (width - height) / 2 smallestSide = height else: x = 0 y = (height - width) / 2 smallestSide = width # image_p = Image.new('RGB',(size, size)) # image = Image.frombytes('RGBa',(size,size),file_get_contents(file)) image.thumbnail((size, size)) ##todo convert to jpeg i = image.tobytes() image.close() # image_p.close() return i
def plt_to_vis(fig,win,name): canvas=fig.canvas import io buf = io.BytesIO() canvas.print_png(buf) data=buf.getvalue() buf.close() buf=io.BytesIO() buf.write(data) img=Image.open(buf) img = np.asarray(img)/255.0 img = img.astype(float)[:,:,0:3] img = torch.FloatTensor(img).permute(2,0,1) vis.image( img, win=str(MULTI_RUN)+'-'+win, opts=dict(title=str(MULTI_RUN)+'-'+name))
def test_horizontal_image_to_vertical_pdf(self): actions = [ (self.pdf_file.name, 0), (PATH_TO_HORIZONTAL_JPG_FILE, 90) ] self.document_clipper_pdf_writer.merge(PATH_TO_NEW_PDF_FILE, actions) new_pdf = open(PATH_TO_NEW_PDF_FILE) new_document_clipper_pdf_reader = DocumentClipperPdfReader(new_pdf) new_document_clipper_pdf_reader.pdf_to_xml() pages = new_document_clipper_pdf_reader.get_pages() self.assertEqual(len(pages), 11) page_with_image = pages[-1] image_width, image_height = new_document_clipper_pdf_reader.get_page_max_dimensions(page_with_image) expected_width = 2008.0 expected_height = 2677.0 self.assertEqual(image_width, expected_width) self.assertEqual(image_height, expected_height)
def get_frames_data(filename, num_frames_per_clip=16): ''' Given a directory containing extracted frames, return a video clip of (num_frames_per_clip) consecutive frames as a list of np arrays ''' ret_arr = [] s_index = 0 for parent, dirnames, filenames in os.walk(filename): if(len(filenames)<num_frames_per_clip): return [], s_index filenames = sorted(filenames) s_index = random.randint(0, len(filenames) - num_frames_per_clip) for i in range(s_index, s_index + num_frames_per_clip): image_name = str(filename) + '/' + str(filenames[i]) img = Image.open(image_name) img_data = np.array(img) ret_arr.append(img_data) return ret_arr, s_index
def crop(self, file, coordinates): """Crop filename and overwrite it.""" try: filename = file.filename extension = self.get_filename_extension(filename) from io import BytesIO m = BytesIO() im = Image.open(file) target = im.crop(coordinates) # target = target.resize(self.size, Image.ANTIALIAS) # Scale down the image to Indexed mode scale_down_img = target.convert('P', colors=255, palette=Image.ADAPTIVE) scale_down_img.save(m, format=extension) file.stream = m file.stream.seek(0) return True except: return False
def read_kitti_data(image_id, image_path, label_path, stride): """Reads groundtruth data from a Kitti annotation file.""" with open(label_path) as file: data = file.read() bboxes = [] lines = data.splitlines() for line in lines: line = line.split() if line[0] in LABEL_IDS: bboxes.append( BoundingBox(line[4], line[5], line[6], line[7], LABEL_IDS[line[0]])) return SSDImage(image_id, image_path, stride, len(LABELS), bboxes=bboxes)
def capture(self, data, term_instance=None): """ Stores *data* as a temporary file and returns that file's object. *term_instance* can be used by overrides of this function to make adjustments to the terminal emulator after the *data* is captured e.g. to make room for an image. """ # Remove the extra \r's that the terminal adds: data = data.replace(b'\r\n', b'\n') logging.debug("capture() len(data): %s" % len(data)) # Write the data to disk in a temporary location self.file_obj = tempfile.TemporaryFile() self.file_obj.write(data) self.file_obj.flush() # Leave it open return self.file_obj
def update_magic(self, filetype, mimetype): """ Replaces an existing FileType with the given *mimetype* in :attr:`self.supported_magic` with the given *filetype*. Example:: >>> import terminal >>> term = terminal.Terminal() >>> class NewPDF = class(terminal.PDFile) >>> # Open PDFs immediately in a new window >>> NewPDF.html_template = "<script>window.open({link})</script>" >>> NewPDF.html_icon_template = NewPDF.html_template # Ignore icon >>> term.update_magic(NewPDF, mimetype="application/pdf") """ # Find the matching magic filetype for i, Type in enumerate(self.supported_magic): if Type.mimetype == mimetype: break # Replace self.magic and self.magic_map del self.magic[Type.re_header] del self.magic_map[Type.re_header] self.magic.update({filetype.re_header: filetype.re_capture}) self.magic_map.update({filetype.re_header: filetype}) # Finally replace the existing filetype in supported_magic self.supported_magic[i] = filetype
def _capture_file(self, ref): """ This function gets called by :meth:`Terminal.write` when the incoming character stream matches a value in :attr:`self.magic`. It will call whatever function is associated with the matching regex in :attr:`self.magic_map`. It also stores the current file capture reference (*ref*) at the current cursor location. """ logging.debug("_capture_file(%s)" % repr(ref)) self.screen[self.cursorY][self.cursorX] = ref filetype_instance = self.captured_files[ref] filetype_instance.capture(self.capture, self) # Start up an open file watcher so leftover file objects get # closed when they're no longer being used if not self.watcher or not self.watcher.isAlive(): import threading self.watcher = threading.Thread( name='watcher', target=self._captured_fd_watcher) self.watcher.setDaemon(True) self.watcher.start() return
def _captured_fd_watcher(self): """ Meant to be run inside of a thread, calls :meth:`Terminal.close_captured_fds` until there are no more open image file descriptors. """ logging.debug("starting _captured_fd_watcher()") import time self.quitting = False while not self.quitting: if self.captured_files: self.close_captured_fds() time.sleep(5) else: self.quitting = True logging.debug('_captured_fd_watcher() quitting: No more images.')
def loadDepthMap(self,filename): """ Read a depth-map :param filename: file name to load :return: image data of depth image """ img = Image.open(filename) # top 8 bits of depth are packed into green channel and lower 8 bits into blue assert len(img.getbands()) == 3 r, g, b = img.split() r = np.asarray(r, np.int32) g = np.asarray(g, np.int32) b = np.asarray(b, np.int32) dpt = np.bitwise_or(np.left_shift(g, 8), b) imgdata = np.asarray(dpt, np.float32) return imgdata
def test_prepare_video(self): vid_returned, size, duration, thumbnail_content = media.prepare_video( self.TEST_VIDEO_PATH, aspect_ratios=1.0, max_duration=10, save_path='media/output.mp4', save_only=True) self.assertEqual(duration, 10.0, 'Invalid duration.') self.assertEqual(size[0], size[1], 'Invalid width/length.') self.assertTrue(os.path.isfile('media/output.mp4'), 'Output file not generated.') self.assertTrue(os.path.isfile(vid_returned), 'Output file not returned.') with self.assertRaises(ValueError) as ve: media.prepare_video( self.TEST_VIDEO_PATH, aspect_ratios=1.0, max_duration=10, save_only=True) self.assertEqual(str(ve.exception), '"save_path" cannot be empty.') self.assertGreater(len(thumbnail_content), 0, 'No thumbnail content returned.') # Save video, thumbnail content and verify attributes vidclip_output = VideoFileClip('media/output.mp4') self.assertAlmostEqual(duration, vidclip_output.duration, places=1) self.assertEqual(size[0], vidclip_output.size[0]) self.assertEqual(size[1], vidclip_output.size[1]) im = Image.open(io.BytesIO(thumbnail_content)) self.assertEqual(size[0], im.size[0]) self.assertEqual(size[1], im.size[1])
def test_prepare_video2(self): video_content, size, duration, thumbnail_content = media.prepare_video( self.TEST_VIDEO_PATH, max_size=(480, 480), min_size=(0, 0)) self.assertEqual(duration, self.TEST_VIDEO_DURATION, 'Duration changed.') self.assertLessEqual(size[0], 480, 'Invalid width.') self.assertLessEqual(size[1], 480, 'Invalid height.') self.assertEqual( 1.0 * size[0] / size[1], 1.0 * self.TEST_VIDEO_SIZE[0] / self.TEST_VIDEO_SIZE[1], 'Aspect ratio changed.') self.assertGreater(len(video_content), 0, 'No video content returned.') self.assertGreater(len(thumbnail_content), 0, 'No thumbnail content returned.') # Save video, thumbnail content and verify attributes video_output = tempfile.NamedTemporaryFile(prefix='ipae_test_', suffix='.mp4', delete=False) video_output.write(video_content) video_output.close() vidclip_output = VideoFileClip(video_output.name) self.assertAlmostEqual(duration, vidclip_output.duration, places=1) self.assertEqual(size[0], vidclip_output.size[0]) self.assertEqual(size[1], vidclip_output.size[1]) im = Image.open(io.BytesIO(thumbnail_content)) self.assertEqual(size[0], im.size[0]) self.assertEqual(size[1], im.size[1])
def test_remote_video(self): video_url = 'https://raw.githubusercontent.com/johndyer/mediaelement-files/master/big_buck_bunny.mp4' video_content, size, duration, thumbnail_content = media.prepare_video( video_url, aspect_ratios=1.0, max_duration=10) self.assertEqual(duration, 10.0, 'Invalid duration.') self.assertEqual(size[0], size[1], 'Invalid width/length.') self.assertGreater(len(video_content), 0, 'No video content returned.') self.assertGreater(len(thumbnail_content), 0, 'No thumbnail content returned.') # Save video, thumbnail content and verify attributes video_output = tempfile.NamedTemporaryFile(prefix='ipae_test_', suffix='.mp4', delete=False) video_output.write(video_content) video_output.close() vidclip_output = VideoFileClip(video_output.name) self.assertAlmostEqual(duration, vidclip_output.duration, places=1) self.assertEqual(size[0], vidclip_output.size[0]) self.assertEqual(size[1], vidclip_output.size[1]) im = Image.open(io.BytesIO(thumbnail_content)) self.assertEqual(size[0], im.size[0]) self.assertEqual(size[1], im.size[1])
def get_image(name): img=Image.open(name) #scipy.misc.imshow(img) #print (img.size) half_the_width = img.size[0] //2 half_the_height = img.size[1] // 2 img4 = img.crop( ( half_the_width - 64, half_the_height - 64, half_the_width + 64, half_the_height + 64 ) ) #scipy.misc.imshow(img4) #img4.save("img4.jpg") #scipy.misc.imshow(img4) k=scipy.misc.imresize(img4, [64, 64, 3], interp='bicubic') #scipy.misc.imshow(k) return k
def keep_file(self, response, min_size=None, max_size=None): """Decide whether to keep the image Compare image size with ``min_size`` and ``max_size`` to decide. Args: response (Response): response of requests. min_size (tuple or None): minimum size of required images. max_size (tuple or None): maximum size of required images. Returns: bool: whether to keep the image. """ try: img = Image.open(BytesIO(response.content)) except (IOError, OSError): return False if min_size and not self._size_gt(img.size, min_size): return False if max_size and not self._size_lt(img.size, max_size): return False return True
def __getitem__(self, index): """ Args: index (int): Index Returns: tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``. """ coco = self.coco img_id = self.ids[index] ann_ids = coco.getAnnIds(imgIds=img_id) target = coco.loadAnns(ann_ids) target = torch.unsqueeze(torch.Tensor(target[0]['bbox']), -1) path = coco.loadImgs(img_id)[0]['file_name'] img = Image.open(os.path.join(self.root, path)).convert('RGB') if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target
def update_unicorn(self): unicorn.clear() global img_file log_string('Start Unicorn image loop') while img_file: img = Image.open(img_file) self.draw_unicorn(img) else: log_string('Something went wrong while picking up the img') pass
def test_unicorn(self): print('Testing all images in folder {}'.format(self.path)) for image in os.listdir(self.path): if image.endswith(self.icon_extension): print('Testing image: {}'.format(self.path + image)) img = Image.open(self.path + image) self.draw_unicorn(img) else: print('Not using this file, not an image: {}'.format(file)) unicorn.clear() unicorn.show()