我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.waitKey()。
def get_points(): # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*8,3), np.float32) objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('calibration_wide/GO*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (8,6), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (8,6), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() return objpoints, imgpoints
def skin_calib(self, raw_yrb): mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb) cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin) cv2.imshow('YRB_calib', cal_skin) k = cv2.waitKey(5) & 0xFF if k == ord('s'): self.calib_switch = False cv2.destroyWindow('YRB_calib') ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib') ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib') rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib') rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib') bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib') bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib') self.mask_lower_yrb = np.array([ymin, rmin, bmin]) self.mask_upper_yrb = np.array([ymax, rmax, bmax]) # Do skin detection with some filtering
def test_image(addr): target = ['angry','disgust','fear','happy','sad','surprise','neutral'] font = cv2.FONT_HERSHEY_SIMPLEX im = cv2.imread(addr) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1) for (x, y, w, h) in faces: cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5) face_crop = im[y:y+h,x:x+w] face_crop = cv2.resize(face_crop,(48,48)) face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY) face_crop = face_crop.astype('float32')/255 face_crop = np.asarray(face_crop) face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1]) result = target[np.argmax(model.predict(face_crop))] cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA) cv2.imshow('result', im) cv2.imwrite('result.jpg',im) cv2.waitKey(0)
def run(self): cv2.namedWindow("display", cv2.WINDOW_NORMAL) cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse) cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale) if self.extra_queue: cv2.namedWindow("extra", cv2.WINDOW_NORMAL) while True: # wait for an image (could happen at the very beginning when the queue is still empty) while len(self.queue) == 0: time.sleep(0.1) im = self.queue[0] cv2.imshow("display", im) k = cv2.waitKey(6) & 0xFF if k in [27, ord('q')]: rospy.signal_shutdown('Quit') elif k == ord('s'): self.opencv_calibration_node.screendump(im) if self.extra_queue: if len(self.extra_queue): extra_img = self.extra_queue[0] cv2.imshow("extra", extra_img)
def run(im): im_disp = im.copy() window_name = "Draw line here." cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE) cv2.moveWindow(window_name, 910, 0) print " Drag across the screen to set lines.\n Do it twice" print " After drawing the lines press 'r' to resume\n" l1 = np.empty((2, 2), np.uint32) l2 = np.empty((2, 2), np.uint32) list = [l1,l2] mouse_down = False def callback(event, x, y, flags, param): global trigger, mouse_down if trigger<2: if event == cv2.EVENT_LBUTTONDOWN: mouse_down = True list[trigger][0] = (x, y) if event == cv2.EVENT_LBUTTONUP and mouse_down: mouse_down = False list[trigger][1] = (x,y) cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]), (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2) trigger += 1 else: pass cv2.setMouseCallback(window_name, callback) while True: cv2.imshow(window_name,im_disp) key = cv2.waitKey(10) & 0xFF if key == ord('r'): # Press key `q` to quit the program return list exit()
def CaptureImage(): imageName = 'DontCare.jpg' #Just a random string cap = cv2.VideoCapture(0) while(True): # Capture frame-by-frame ret, frame = cap.read() #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome rgbImage = frame #For capture the image in RGB color space # Display the resulting frame cv2.imshow('Webcam',rgbImage) #Wait to press 'q' key for capturing if cv2.waitKey(1) & 0xFF == ord('q'): #Set the image name to the date it was captured imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg' #Save the image cv2.imwrite(imageName, rgbImage) break # When everything done, release the capture cap.release() cv2.destroyAllWindows() #Returns the captured image's name return imageName
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27): cap = cv2.VideoCapture(src) while(1): ret, frame = cap.read() # To speed up processing; Almost real-time on my PC frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5) frame = cv2.flip(frame, 1) out = func(frame, args) if out is None: continue out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4) cv2.imshow(win_name, out) cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2) k = cv2.waitKey(delim_wait) if k == delim_key: cv2.destroyAllWindows() cap.release() return
def cvCaptureVideo(): capture = cv2.VideoCapture(0) if capture.isOpened() is False: raise("IO Error") cv2.namedWindow("Capture", cv2.WINDOW_NORMAL) while True: ret, image = capture.read() if ret == False: continue cv2.imshow("Capture", image) if cv2.waitKey(1) & 0xFF == ord('q'): break capture.release() cv2.destroyAllWindows() # Matplot???Web????????????
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_NORMAL) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
def on_mouse(event, x, y, flags, params): # global img t = time() if event == cv2.EVENT_LBUTTONDOWN: print 'Start Mouse Position: '+str(x)+', '+str(y) sbox = [x, y] boxes.append(sbox) # print count # print sbox elif event == cv2.EVENT_LBUTTONUP: print 'End Mouse Position: '+str(x)+', '+str(y) ebox = [x, y] boxes.append(ebox) print boxes crop = img[boxes[-2][1]:boxes[-1][1],boxes[-2][0]:boxes[-1][0]] cv2.imshow('crop',crop) k = cv2.waitKey(0) if ord('r')== k: cv2.imwrite('Crop'+str(t)+'.jpg',crop) print "Written to file"
def camera_detector(self, cap, wait=10): detect_timer = Timer() ret, _ = cap.read() while ret: ret, frame = cap.read() detect_timer.tic() result = self.detect(frame) detect_timer.toc() print('Average detecting time: {:.3f}s'.format(detect_timer.average_time)) self.draw_result(frame, result) cv2.imshow('Camera', frame) cv2.waitKey(wait) ret, frame = cap.read()
def test_uw_rgbd_scene(version='v1'): from pybot.vision.image_utils import to_color from pybot.vision.imshow_utils import imshow_cv v1_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v1/' v2_directory = '/media/spillai/MRG-HD1/data/rgbd-scenes-v2/rgbd-scenes-v2/' if version == 'v1': rgbd_data_uw = UWRGBDSceneDataset(version='v1', directory=os.path.join(v1_directory, 'rgbd-scenes'), aligned_directory=os.path.join(v1_directory, 'rgbd-scenes-aligned')) elif version == 'v2': rgbd_data_uw = UWRGBDSceneDataset(version='v2', directory=v2_directory) else: raise RuntimeError('''Version %s not supported. ''' '''Check dataset and choose v1/v2 scene dataset''' % version) for f in rgbd_data_uw.iteritems(every_k_frames=5, with_ground_truth=True): vis = rgbd_data_uw.annotate(f) imshow_cv('frame', np.hstack([f.img, vis]), text='Image') imshow_cv('depth', (f.depth / 16).astype(np.uint8), text='Depth') cv2.waitKey(100) return rgbd_data_uw
def imshow_cv(label, im, block=False, text=None, wait=2): vis = im.copy() print_status(vis, text=text) window_manager.imshow(label, vis) ch = cv2.waitKey(0 if block else wait) & 0xFF if ch == ord(' '): cv2.waitKey(0) if ch == ord('v'): print('Entering debug mode, image callbacks active') while True: ch = cv2.waitKey(10) & 0xFF if ch == ord('q'): print('Exiting debug mode!') break if ch == ord('s'): fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S") print 'Saving %s' % fn cv2.imwrite(fn, vis) elif ch == 27 or ch == ord('q'): sys.exit(1)
def camera_callback(self, msg): try: self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8") except cv_bridge.CvBridgeError: return gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 30, 150) cv2.imshow("Robot Camera", canny) cv2.waitKey(1)
def bench(folder): from os.path import join from video_capture.av_file_capture import File_Capture cap = File_Capture(join(folder,'marker-test.mp4')) markers = [] detected_count = 0 for x in range(500): frame = cap.get_frame() img = frame.img gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True) draw_markers(img, markers) cv2.imshow('Detected Markers', img) # for m in markers: # if 'img' in m: # cv2.imshow('id %s'%m['id'], m['img']) # cv2.imshow('otsu %s'%m['id'], m['otsu']) if cv2.waitKey(1) == 27: break detected_count += len(markers) print(detected_count) #2900 #3042 #3021
def color_quant(input,K,output): img = cv2.imread(input) Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) cv2.imshow('res2',res2) cv2.waitKey(0) cv2.imwrite(output, res2) cv2.destroyAllWindows()
def findpathlist(ed,showPics=True): ''' generate list of pathes ''' pathlist=[] w,l=ed.shape for x in range(l): for y in range(w): if ed[y][x] : path=runpath(ed,x,y) if len(path)>4: if showPics: cv2.imshow('remaining points',ed) cv2.waitKey(1) Gui.updateGui() pathlist.append(path) return pathlist
def run(self): stop = False _, im = self.cam.read() while 1: if not self.video_stat.is_drug and not stop: _, im = self.cam.read() self.video_stat.update(im) self.box_saver.save(im,self.video_stat.frame_id,self.video_stat.get_boxes()) else: if self.video_stat.is_drug: self.video_stat.draw_update(im, self.label_stat.get_label_name()) chr=cv2.waitKey(1) & 0xFF if chr==ord(' '): # press space to stop the frame stop= not stop if chr==27: # Esc key to exit break
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None): '''Generate test images as solid blocks of colour of known size, save to filename fn.''' # Create a zero (black) image of HD size with 3 colour dimensions. Colour space assumed BGR by default. h = 1080 w = 1920 img = np.zeros((h,w,3),dtype="uint8") # Want to set all of the pixels to bgr tuple, default red, 8 bit colour if not bgr: bgr = [0,0,255] img[:,:] = bgr vw = ImageViewer(img) vw.windowShow() #cv2.imshow("zeroes", frame) #ch = 0xff & cv2.waitKey(10000) #cv2.destroyAllWindows() cv2.imwrite(fn, img)
def gimpMarkup(self, hints = gimpContours, image = "2x2-red-1.jpg", feature = "top-left-monitor"): r = Rectangle(*hints[image][feature]) contour = r.asContour() cv2.drawContours(self.img, [contour], -1, (0, 255, 0), 5 ) title = self.tgen.next(feature) if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title) roi = r.getRoi(self.img) self.rois[feature] = roi # Histogram the ROI to get the spread of intensities, in each channel and grayscale title = '%s-roi.jpg' % feature if self.show: ImageViewer(roi).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title) colors = ('b','g','r') for i,col in enumerate(colors): hist = cv2.calcHist([roi], [i], None, [256], [0,256]) plt.plot(hist, color = col) plt.xlim([0,256]) #plt.hist(roi.ravel(), 256, [0,256]) plt.show() cmap = ColorMapper(roi) cmap.mapit(1) title = self.tgen.next('colourMapping') if self.show: ImageViewer(self.img).show(window=title, destroy = self.destroy, info = self.info, thumbnailfn = title) cv2.waitKey()
def do_warp(M, warp): warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight)) # convert the warped image to grayscale and then adjust # the intensity of the pixels to have minimum and maximum # values of 0 and 255, respectively warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY) warp = exposure.rescale_intensity(warp, out_range = (0, 255)) # the pokemon we want to identify will be in the top-right # corner of the warped image -- let's crop this region out (h, w) = warp.shape (dX, dY) = (int(w * 0.4), int(h * 0.45)) crop = warp[10:dY, w - dX:w - 10] # save the cropped image to file cv2.imwrite("cropped.png", crop) # show our images cv2.imshow("image", image) cv2.imshow("edge", edged) cv2.imshow("warp", imutils.resize(warp, height = 300)) cv2.imshow("crop", imutils.resize(crop, height = 300)) cv2.waitKey(0)
def show_cut_img(img_name): img = cv2.imread(img_name, 0) cut_img = cut(img) cv2.imshow('cut image', cut_img) cv2.waitKey(0) cv2.destroyAllWindows() return cut_img # ??????????????????id??logoDirs????
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow("result", im) cv2.waitKey() cv2.destroyAllWindows()
def save_images(self, dirname='dump'): import os img_no = 1 # Makes the directory if not os.path.exists('./' + dirname): os.mkdir(dirname) while True: self.grab_frame() if self.debug: cv2.imshow('frame', self.img) k = cv2.waitKey(1) & 0xFF if k == ord('s'): cv2.imwrite(os.path.join(dirname, 'dump_' + str(img_no) + '.jpg'), self.img) img_no += 1 elif k == ord('q'): break cv2.destroyAllWindows() # Destructor
def process_video(path_to_video): cap = cv2.VideoCapture(path_to_video) # Load video while True: ret, frame = cap.read() print frame if ret is False or (cv2.waitKey(30) & 0xff) == 27: break # Exit if the video ended mask = np.zeros_like(frame) # init mask contours = find_contours(frame) plates, plates_images, mask = find_plate_numbers(frame, contours, mask) print "Plate Numbers: %s" % ", ".join(plates) processed_frame = cv2.add(frame, mask) # Apply the mask to image cv2.imshow('frame', processed_frame) cv2.destroyAllWindows() cap.release() ########################################### # Run The Program ######################### ###########################################
def next_batch(self): self.count += 1 # print self.count start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img if self.index_in_epoch > self.number: self.index_in_epoch = 0 start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img end = self.index_in_epoch data_batch, label_batch = generate_data(self.img_path_list[start]) for i in range(start+1, end): data, label = generate_data(self.img_path_list[i]) # [4, 2, 128, 128], [4, 1, 8] data_batch = np.concatenate((data_batch, data)) # [64, 2, 128, 128] label_batch = np.concatenate((label_batch, label)) # [64, 1, 8] data_batch = np.array(data_batch).transpose([0, 2, 3, 1]) # (64, 128, 128, 2) # cv2.imshow('window2', data_batch[1,:,:,1].squeeze()) # cv2.waitKey() label_batch = np.array(label_batch).squeeze() # (64, 1, 8) return data_batch, label_batch
def next_batch(self): start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img if self.index_in_epoch > self.number: self.index_in_epoch = 0 start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img end = self.index_in_epoch data_batch, label_batch = generate_data(self.img_path_list[start]) for i in range(start+1, end): data, label = generate_data(self.img_path_list[i]) data_batch = np.concatenate((data_batch, data)) label_batch = np.concatenate((label_batch, label)) data_batch = np.array(data_batch).transpose([0, 2, 3, 1]) # cv2.imshow('window2', data_batch[1,:,:,1].squeeze()) # cv2.waitKey() label_batch = np.array(label_batch).squeeze() return data_batch, label_batch
def show_img(): global face_rect #???????????????? while True: img = cv.QueryFrame(cam)# ???????? #???????? src=cv.CreateImage((img.width, img.height), 8, 3) cv.Resize(img,src,cv.CV_INTER_LINEAR) #?????? gray=cv.CreateImage((img.width, img.height), 8, 1) cv.CvtColor(img, gray, cv.CV_BGR2GRAY)#?rgb??????? cv.EqualizeHist(gray,gray)#???????????? rects = detect(gray, cascade)#??????????????????????????? face_rect=rects #????????? draw_rects(src, rects, (0, 255, 0)) #??????? cv.ShowImage('DeepFace Wang_jun_qian', src) cv2.waitKey(5) == 27 cv2.destroyAllWindows()
def test_minicap(): from atx.drivers.android_minicap import AndroidDeviceMinicap cv2.namedWindow("preview") d = AndroidDeviceMinicap() while True: try: h, w = d._screen.shape[:2] img = cv2.resize(d._screen, (w/2, h/2)) cv2.imshow('preview', img) key = cv2.waitKey(1) if key == 100: # d for dump filename = time.strftime('%Y%m%d%H%M%S.png') cv2.imwrite(filename, d._screen) except KeyboardInterrupt: break cv2.destroyWindow('preview')
def test_grid(): m = StupidMonkey({'touch':10}) poss = [] while True: pos = m.get_touch_point() if not pos: break poss.append(pos) print 'grid point count:', len(poss) import cv2 import numpy img = numpy.zeros((1920, 1080)) for x,y in poss: img[x,y] = 255 img = cv2.resize(img, (540, 960)) cv2.imshow('grid', img) cv2.waitKey()
def main(serial=None, host=None, port=None): d = atx.connect(serial, host=host, port=port) while True: pilimg = d.screenshot() cv2img = imutils.from_pillow(pilimg) # cv2img = cv2.imread('tmp.png') # cv2.imwrite('tmp.png', cv2img) cv2img = cv2.resize(cv2img, fx=0.5, fy=0.5, dsize=(0, 0)) pt = choose_point(cv2img) print 'click:', pt if pt: x, y = pt d.click(2*x, 2*y) cv2.waitKey(100) # import time # time.sleep(0.1)
def draw_boxes(im, bboxes, is_display=True, color=None, caption="Image", wait=True): """ boxes: bounding boxes """ im=im.copy() for box in bboxes: if color==None: if len(box)==5 or len(box)==9: c=tuple(cm.jet([box[-1]])[0, 2::-1]*255) else: c=tuple(np.random.randint(0, 256, 3)) else: c=color cv2.rectangle(im, tuple(box[:2]), tuple(box[2:4]), c) if is_display: cv2.imshow(caption, im) if wait: cv2.waitKey(0) return im
def plot_conf_mat(densmap_name): fig = plt.figure(figsize = (20,20)) plt.clf() ax = fig.add_subplot(111) #ax.set_aspect(1) densmap = np.fromfile(densmap_name, np.float32) densmap = densmap.reshape(227, 227) densmap *= 100 densmap[densmap > 1] = 1 res = ax.imshow(densmap, cmap = plt.cm.jet, interpolation = 'nearest') plt.savefig('density.jpg') img = cv2.imread("density.jpg") img = cv2.resize(img, (227,227)) cv2.imshow("i", img)# cv2.waitKey(0) #plt.show()
def pick_corrs(images, n_pts_to_pick=4): data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick] for i, image in enumerate(images)] for d in data: win_name = d[6] cv2.namedWindow(win_name) cv2.setMouseCallback(win_name, corr_picker_callback, d) cv2.startWindowThread() cv2.imshow(win_name, d[5]) key = None while key != '\n' and key != '\r' and key != 'q': key = cv2.waitKey(33) key = chr(key & 255) if key >= 0 else None cv2.destroyAllWindows() if key == 'q': return None else: return [d[0] for d in data]
def detect(imgfile): origimg = cv2.imread(imgfile) img = preprocess(origimg) img = img.astype(np.float32) img = img.transpose((2, 0, 1)) net.blobs['data'].data[...] = img out = net.forward() box, conf, cls = postprocess(origimg, out) for i in range(len(box)): p1 = (box[i][0], box[i][1]) p2 = (box[i][2], box[i][3]) cv2.rectangle(origimg, p1, p2, (0,255,0)) p3 = (max(p1[0], 15), max(p1[1], 15)) title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i]) cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1) cv2.imshow("SSD", origimg) k = cv2.waitKey(0) & 0xff #Exit if ESC pressed if k == 27 : return False return True
def main(args): saveFace = None; cap = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') while(True): # Capture frame-by-frame ret, frame = cap.read() faces = face_cascade.detectMultiScale(frame, 1.3, 5) if len(faces) > 0: saveFace = frame break; # Display the resulting frame cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() cv2.destroyAllWindows() cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame) mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] myImage = [] for file in onlyfiles: isImage = None file = mypath + '/' + file isImage = imghdr.what(file) if isImage != None: myImage.append(file) #begin facenet cp.main(args,myImage);
def evaluate(img_col, args): numpy.seterr(all='ignore') assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array' assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim) assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args)) img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY) rows, cols = img_gry.shape crow, ccol = rows/2, cols/2 f = numpy.fft.fft2(img_gry) fshift = numpy.fft.fftshift(f) fshift[crow-75:crow+75, ccol-75:ccol+75] = 0 f_ishift = numpy.fft.ifftshift(fshift) img_fft = numpy.fft.ifft2(f_ishift) img_fft = 20*numpy.log(numpy.abs(img_fft)) if args.display and not args.testing: cv2.destroyAllWindows() scripts.display('img_fft', img_fft) scripts.display('img_col', img_col) cv2.waitKey(0) result = numpy.mean(img_fft) return img_fft, result, result < args.thresh
def visualize_image(image, name="Image", resize=False, save_image=False, path=None): """Helper function to visualize and save any image""" image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT]) image = image.astype(np.uint8) if resize: image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10)) cv2.imshow(name, image) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() if save_image: assert path is not None cv2.imwrite(path, image)
def image_preview(image): cv2.imshow('Image preview', image) cv2.waitKey(0) cv2.destroyAllWindows()
def display_solution(square_borders, start_grid, solution, image): """ Writes the solution to an image and displays said image. Params: square_borders -- A list containing the borders of all squares start_grid -- A list containing the sudoku starting values solution -- A list containing the sudoku solution image -- The image to write to """ cur_row = 0 cur_col = 0 for i, b in enumerate(square_borders): x, y, x2, y2 = b # Tuple unpacking # Calculate bottom-left position for text text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10 # Bottom-left corner for text position org = (text_x, text_y) # Only write text if the position was not set in the start_grid if start_grid[cur_row][cur_col] is 0: value = str(solution[cur_row][cur_col]) cv2.putText( img=image, text=value, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=2) cur_col += 1 if cur_col % 9 == 0: cur_row += 1 cur_col = 0 cv2.imshow('Solution', image) cv2.waitKey(0) cv2.destroyAllWindows()
def process_image(image): array_image = numpy.array(image) array_image = cv2.resize(array_image, (0,0), fx=2, fy=2) #cv2.imshow('image', array_image) #cv2.waitKey(0) image = PIL.Image.fromarray(array_image) return(image)
def online_receive_frame(viewer_state, episode): logger.info('Waiting for message...') if not viewer_state.conn.poll(1000.0): raise EnvironmentError("Failed to receive message!") msg = viewer_state.conn.recv() if isinstance(msg, frame_module.Frame): viewer_state.current_frame = msg episode.frames.append(viewer_state.current_frame) elif msg['msg'] == 'init': viewer_state.prev_frame = msg viewer_state.action_set = msg['action_set'] viewer_state.episode_num_in_session += 1 print('Initial message received') print('... got action_set: {}'.format(viewer_state.action_set)) print('... episode_num: {}'.format(viewer_state.episode_num_in_session)) viewer_state.env_id = msg['env_id'] viewer_state.skip_frame = True elif msg['msg'] == 'close': viewer_state.conn.close() viewer_state.close = True elif msg['msg'] == 'done': viewer_state.frame_index = 0 proceed = input('Proceed to next episode?') if proceed != 'y': viewer_state.EXIT = True while True: k = cv2.waitKey(viewer_state.delay) & 0xFF if k == 255: break viewer_state.skip_episode = True else: print('Unknown message received: {}'.format(msg)) viewer_state.skip_frame = True return viewer_state, episode
def test(self): init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) self.saver_z.restore(sess, self.encode_z_model) self.saver_y.restore(sess, self.encode_y_model) realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50, self.batch_size) output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array}) #one-hot #label_y = tf.arg_max(label_y, 1) print label_y save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0)) save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0)) gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0) real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0) cv2.imshow("test_EGan", gen_img) cv2.imshow("Real_Image", real_img) cv2.waitKey(-1) print("Test finish!")
def image_detector(self, imname, wait=0): detect_timer = Timer() image = cv2.imread(imname) detect_timer.tic() result = self.detect(image) detect_timer.toc() print('Average detecting time: {:.3f}s'.format(detect_timer.average_time)) self.draw_result(image, result) cv2.imshow('Image', image) cv2.waitKey(wait)
def getFrame(self): ret = False count = 0 while not ret: # Capture frame-by-frame count = count + 1 ret, frame = self.cap.read() if cv2.waitKey(1) & 0xFF == ord('q'): return (False, None) if count > 10: return (False, None) return (True, frame)
def showFrame(self, frame): # Display the resulting frame cv2.imshow(self.frame_name, frame) if cv2.waitKey(1) & 0xFF == ord('q'): return False else: return True