我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.destroyWindow()。
def skin_calib(self, raw_yrb): mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb) cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin) cv2.imshow('YRB_calib', cal_skin) k = cv2.waitKey(5) & 0xFF if k == ord('s'): self.calib_switch = False cv2.destroyWindow('YRB_calib') ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib') ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib') rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib') rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib') bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib') bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib') self.mask_lower_yrb = np.array([ymin, rmin, bmin]) self.mask_upper_yrb = np.array([ymax, rmax, bmax]) # Do skin detection with some filtering
def test_minicap(): from atx.drivers.android_minicap import AndroidDeviceMinicap cv2.namedWindow("preview") d = AndroidDeviceMinicap() while True: try: h, w = d._screen.shape[:2] img = cv2.resize(d._screen, (w/2, h/2)) cv2.imshow('preview', img) key = cv2.waitKey(1) if key == 100: # d for dump filename = time.strftime('%Y%m%d%H%M%S.png') cv2.imwrite(filename, d._screen) except KeyboardInterrupt: break cv2.destroyWindow('preview')
def _showloop(self): while cv2.waitKey(10) not in [keycode.ESCAPE, keycode.Q, keycode.q]: image = self.capture.read() image = image.transpose(Image.FLIP_LEFT_RIGHT) image = _resize_image(image, self.size) array = np.asarray(image) array = _mount_roi(array, self.roi, color = (74, 20, 140), thickness = 2) crop = _crop_array(array, self.roi) # process image for any gestures if self.verbose: segments, event = spockpy.detect(crop, verbose = self.verbose) else: event = spockpy.detect(crop, verbose = self.verbose) self.image = Image.fromarray(segments) self.event = event cv2.imshow(HoverPad.TITLE, array) cv2.destroyWindow(HoverPad.TITLE)
def load_images(queue: PriorityQueue, source: int, file_path: str, target_width: int, target_height: int, display_progress: bool=False): window = 'image' if display_progress: cv2.namedWindow(window) for file in iglob(path.join(file_path, '**', '*.jpg'), recursive=True): buffer = cv2.imread(file) buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA) random_priority = random() queue.put((random_priority, (buffer, source))) if display_progress: cv2.imshow(window, buffer) if (cv2.waitKey(33) & 0xff) == 27: break if display_progress: cv2.destroyWindow(window)
def __init__(self, image, filter_size=1, threshold1=0, threshold2=0): self.image = image self._filter_size = filter_size self._threshold1 = threshold1 self._threshold2 = threshold2 def onchangeThreshold1(pos): self._threshold1 = pos self._render() def onchangeThreshold2(pos): self._threshold2 = pos self._render() def onchangeFilterSize(pos): self._filter_size = pos self._filter_size += (self._filter_size + 1) % 2 self._render() cv2.namedWindow('edges') cv2.createTrackbar('threshold1', 'edges', self._threshold1, 255, onchangeThreshold1) cv2.createTrackbar('threshold2', 'edges', self._threshold2, 255, onchangeThreshold2) cv2.createTrackbar('filter_size', 'edges', self._filter_size, 20, onchangeFilterSize) self._render() print "Adjust the parameters as desired. Hit any key to close." cv2.waitKey(0) cv2.destroyWindow('edges') cv2.destroyWindow('smoothed')
def display(win_name, img): cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) cv2.imshow( win_name, numpy.asarray( img[:,:] )) k=-1 while k ==-1: k=waitkey() cv2.destroyWindow(win_name) if k in [27, ord('q')]: rospy.signal_shutdown('Quit')
def run(self): print ("VEDIO client starts...") while True: try: self.sock.connect(self.ADDR) break except: time.sleep(3) continue print ("video client <-> remote server success connected...") check = "F" check = self.sock.recv(1) if check.decode("utf-8") != "S": return print ("receive authend") #self.cap = cv2.VideoCapture(0) self.cap = cv2.VideoCapture("test.mp4") if self.showme: cv2.namedWindow('You', cv2.WINDOW_NORMAL) print ("remote VEDIO client connected...") while self.cap.isOpened(): ret, frame = self.cap.read() if self.showme: cv2.imshow('You', frame) if cv2.waitKey(1) & 0xFF == 27: self.showme = False cv2.destroyWindow('You') if self.level > 0: frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx) data = pickle.dumps(frame) zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION) try: self.sock.sendall(struct.pack("L", len(zdata)) + zdata) print("video send ", len(zdata)) except: break for i in range(self.interval): self.cap.read()
def run(self): while True: try: self.sock.connect(self.ADDR) break except: time.sleep(3) continue if self.showme: cv2.namedWindow('You', cv2.WINDOW_NORMAL) print("VEDIO client connected...") while self.cap.isOpened(): ret, frame = self.cap.read() if self.showme: cv2.imshow('You', frame) if cv2.waitKey(1) & 0xFF == 27: self.showme = False cv2.destroyWindow('You') sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx) data = pickle.dumps(sframe) zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION) try: self.sock.sendall(struct.pack("L", len(zdata)) + zdata) except: break for i in range(self.interval): self.cap.read()
def __init__(self, img, squares, all = True): #w = ImageViewer(img) square_contours = [square.contour for square in squares] best_contours = [] best_contour = classify_monitor_contour_set(square_contours) best_contours.append(best_contour.astype('int32')) print('Iterate over %d contours' % len(square_contours)) if all: cycle = True while (cycle): for (i, c) in enumerate(square_contours): src = img.copy() cv2.drawContours( src, best_contours, -1, (0,0,255),3) cv2.drawContours( src, square_contours, i, (0, 255, 0), 1 ) print('contour %d overlaid on basic image' % i) cv2.imshow('view', src) time.sleep(0.2) k = cv2.waitKey(30) & 0xFF if k == 27: cycle = False else: cycle = True src = img.copy() while (cycle): cv2.drawContours( src, best_contours, -1, (0,0,255),3) cv2.imshow('view', src) time.sleep(0.2) k = cv2.waitKey(30) & 0xFF if k == 27: cycle = False cv2.destroyWindow('view')
def __init__(self, img, squares, all = True): #w = ImageViewer(img) square_contours = [square.contour for square in squares] #pdb.set_trace() best_contours_tuples = classify_multi_monitors_contour_set(square_contours) best_contours = [contour.astype('int32') for (contour, index) in best_contours_tuples] #pdb.set_trace() #print('Iterate over %d contours' % len(square_contours)) if all: cycle = True while (cycle): for (i, c) in enumerate(square_contours): src = img.copy() cv2.drawContours( src, square_contours, i, (0, 255, 0), 1 ) cv2.drawContours( src, best_contours, -1, (0,0,255),3) print('contour %d overlaid on basic image' % i) cv2.imshow('view', src) time.sleep(0.2) k = cv2.waitKey(30) & 0xFF if k == 27: cycle = False else: cycle = True src = img.copy() while (cycle): cv2.drawContours( src, best_contours, -1, (0,0,255),3) cv2.imshow('view', src) time.sleep(0.2) k = cv2.waitKey(30) & 0xFF if k == 27: cycle = False cv2.destroyWindow('view') ##################################################################################################################### # Contours and Sets of Contours : various problems in computer vision relevant to the project. # # All "Heuristics" functions have the same signature ; img, cnts, *args, **kwargs #####################################################################################################################
def closeWindow(win="video"): cv2.destroyWindow(win) for i in range(4): cv2.waitKey(1)
def testModel(self): """ This method is to test the trained classifier read all images from testing path use BOVHelpers.predict() function to obtain classes of each image """ self.testImages, self.testImageCount = self.file_helper.getFiles(self.test_path) predictions = [] for word, imlist in self.testImages.iteritems(): print "processing " ,word for im in imlist: cl = self.recognize(im) predictions.append({ 'image':im, 'class':cl, 'object_name':self.name_dict[str(int(cl[0]))] }) print predictions for each in predictions: # cv2.imshow(each['object_name'], each['image']) # cv2.waitKey() # cv2.destroyWindow(each['object_name']) # plt.imshow(cv2.cvtColor(each['image'], cv2.COLOR_GRAY2RGB)) plt.title(each['object_name']) plt.show()
def test_features(): from atx.drivers.android_minicap import AndroidDeviceMinicap cv2.namedWindow("preview") d = AndroidDeviceMinicap() # r, h, c, w = 200, 100, 200, 100 # track_window = (c, r, w, h) # oldimg = cv2.imread('base1.png') # roi = oldimg[r:r+h, c:c+w] # hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) # mask = cv2.inRange(hsv_roi, 0, 255) # roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180]) # cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX) # term_cirt = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) while True: try: w, h = d._screen.shape[:2] img = cv2.resize(d._screen, (h/2, w/2)) cv2.imshow('preview', img) hist = cv2.calcHist([img], [0], None, [256], [0,256]) plt.plot(plt.hist(hist.ravel(), 256)) plt.show() # if img.shape == oldimg.shape: # # hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # # ret, track_window = cv2.meanShift(hsv, track_window, term_cirt) # # x, y, w, h = track_window # cv2.rectangle(img, (x, y), (x+w, y+h), 255, 2) # cv2.imshow('preview', img) # # cv2.imshow('preview', img) cv2.waitKey(1) except KeyboardInterrupt: break cv2.destroyWindow('preview')
def screen_simple(host, port, serial, scale=0.5): adb = get_adb(host, port, serial) img = adb.screenshot_cv2() while img is None: time.sleep(1) img = adb.screenshot_cv2() print 'Press Ctrl-C or Esc to quit.' winname = 'Sync Screen' cv2.namedWindow(winname) while True: try: img = adb.screenshot_cv2() if scale != 1.0: h, w = img.shape[:2] h, w = int(scale*h), int(scale*w) img = cv2.resize(img, (w, h)) cv2.imshow(winname, img) key = cv2.waitKey(10) if key == 27: # Escape break except KeyboardInterrupt: print 'Done' break except: traceback.print_exc() break cv2.destroyWindow(winname)
def filter_frame_manually(self): display_image = self.frame cv2.imshow("frame of video {0:s}".format(self.name), display_image) key = cv2.waitKey(0) & 0xFF add_corners = (key == ord('a')) cv2.destroyWindow("frame") return add_corners, key
def filter_frame_manually(self): display_image = np.hstack([video.frame for video in self.videos]) cv2.imshow("frame", display_image) key = cv2.waitKey(0) & 0xFF add_corners = (key == ord('a')) cv2.destroyWindow("frame") return add_corners, key
def selectArea(self): self.userInteraction = True cv2.namedWindow(self.selectionWindow) cv2.setMouseCallback(self.selectionWindow, self.mouseInteraction) self.workingFrame = self.processedFrame.copy() self.showFrame(self.selectionWindow, self.workingFrame) while True: key = cv2.waitKey(1) & 0xFF if key == ord('q'): self.undoFrames = [] break elif key == ord('c'): self.workingFrame = self.processedFrame.copy() self.trackedAreasList = [] self.undoFrames = [] self.showFrame(self.selectionWindow, self.workingFrame) elif key == ord('l'): try: self.trackedAreasList.pop() except IndexError: pass else: self.workingFrame = self.undoFrames.pop() self.showFrame(self.selectionWindow, self.workingFrame) elif key == ord('t'): self.undoFrames = [] self.trackArea = self.refPt self.tracking = True self.trackDump = [] if self.pause is True: self.pause = False break elif key == ord('h'): self.showHelp('select') cv2.destroyWindow(self.selectionWindow) self.userInteration = False
def toggle_debug_mode(self): self.debug_mode = not self.debug_mode if not self.debug_mode: cv2.destroyWindow(DEBUG_WINDOW)
def release(self): self.is_stopped = True time.sleep(1) # get rid of video stream window if self.play_video: cv2.destroyWindow('live') # Release video capture self.cap.release()
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10): """ Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces. :param model: Learnt emotion detection model. :param emoticons: List of emotions images. :param window_size: Size of webcam image window. :param window_name: Name of webcam image window. :param update_time: Image update time interval. """ cv2.namedWindow(window_name, WINDOW_NORMAL) if window_size: width, height = window_size cv2.resizeWindow(window_name, width, height) vc = cv2.VideoCapture(0) if vc.isOpened(): read_value, webcam_image = vc.read() else: print("webcam not found") return while read_value: for normalized_face, (x, y, w, h) in find_faces(webcam_image): prediction = model.predict(normalized_face) # do prediction if cv2.__version__ != '3.1.0': prediction = prediction[0] image_to_draw = emoticons[prediction] draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h)) cv2.imshow(window_name, webcam_image) read_value, webcam_image = vc.read() key = cv2.waitKey(update_time) if key == 27: # exit on ESC break cv2.destroyWindow(window_name)
def calibrateColor(color, def_range): global kernel name = 'Calibrate '+ color cv2.namedWindow(name) cv2.createTrackbar('Hue', name, 0, 180, nothing) cv2.createTrackbar('Sat', name, 0, 255, nothing) cv2.createTrackbar('Val', name, 0, 255, nothing) while(1): ret , frameinv = cap.read() frame=cv2.flip(frameinv ,1) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hue = cv2.getTrackbarPos('Hue', name) sat = cv2.getTrackbarPos('Sat', name) val = cv2.getTrackbarPos('Val', name) lower = np.array([hue-20,sat,val]) upper = np.array([hue+20,255,255]) mask = cv2.inRange(hsv, lower, upper) eroded = cv2.erode( mask, kernel, iterations=1) dilated = cv2.dilate( eroded, kernel, iterations=1) cv2.imshow(name, dilated) k = cv2.waitKey(5) & 0xFF if k == ord(' '): cv2.destroyWindow(name) return np.array([[hue-20,sat,val],[hue+20,255,255]]) elif k == ord('d'): cv2.destroyWindow(name) return def_range
def calibrateColor(color, def_range): global kernel name = 'Calibrate '+ color cv2.namedWindow(name) cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing) cv2.createTrackbar('Sat', name, def_range[0][1] , 255, nothing) cv2.createTrackbar('Val', name, def_range[0][2] , 255, nothing) while(1): ret , frameinv = cap.read() frame=cv2.flip(frameinv ,1) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hue = cv2.getTrackbarPos('Hue', name) sat = cv2.getTrackbarPos('Sat', name) val = cv2.getTrackbarPos('Val', name) lower = np.array([hue-20,sat,val]) upper = np.array([hue+20,255,255]) mask = cv2.inRange(hsv, lower, upper) eroded = cv2.erode( mask, kernel, iterations=1) dilated = cv2.dilate( eroded, kernel, iterations=1) cv2.imshow(name, dilated) k = cv2.waitKey(5) & 0xFF if k == ord(' '): cv2.destroyWindow(name) return np.array([[hue-20,sat,val],[hue+20,255,255]]) elif k == ord('d'): cv2.destroyWindow(name) return def_range
def calibrateColor(color, def_range): global kernel name = 'Calibrate '+ color cv2.namedWindow(name) cv2.createTrackbar('Hue', name, def_range[0][0]+20, 180, nothing) cv2.createTrackbar('Sat', name, def_range[0][1], 255, nothing) cv2.createTrackbar('Val', name, def_range[0][2], 255, nothing) while(1): ret , frameinv = cap.read() frame=cv2.flip(frameinv ,1) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hue = cv2.getTrackbarPos('Hue', name) sat = cv2.getTrackbarPos('Sat', name) val = cv2.getTrackbarPos('Val', name) lower = np.array([hue-20,sat,val]) upper = np.array([hue+20,255,255]) mask = cv2.inRange(hsv, lower, upper) eroded = cv2.erode( mask, kernel, iterations=1) dilated = cv2.dilate( eroded, kernel, iterations=1) cv2.imshow(name, dilated) k = cv2.waitKey(5) & 0xFF if k == ord(' '): cv2.destroyWindow(name) return np.array([[hue-20,sat,val],[hue+20,255,255]])
def select_roi(self): """Prompt user for a region of interest. Args: None. Returns: ROI (tuple): selected ROI coordinates. """ ROI = cv2.selectROI('Select region of interest...', self.current_frame, False, False) cv2.destroyWindow('Select region of interest...') return ROI
def realtime(): #initialize preview cv2.namedWindow("preview") vc = cv2.VideoCapture(0) if vc.isOpened(): #get the first frame rval, frame = vc.read() else: rval = False classes=["peace","punch","stop","thumbs_up"] while rval: frame=cv2.flip(frame,1) cv2.rectangle(frame,(300,200),(500,400),(0,255,0),1) cv2.putText(frame,"Place your hand in the green box.", (50,50), cv2.FONT_HERSHEY_PLAIN , 1, 255) cv2.putText(frame,"Press esc to exit.", (50,100), cv2.FONT_HERSHEY_PLAIN , 1, 255) cv2.imshow("preview", frame) frame=frame[200:400,300:500] #frame = cv2.resize(frame, (200,200)) frame = cv2.cvtColor( frame, cv2.COLOR_RGB2GRAY) frame=frame.reshape((1,)+frame.shape) frame=frame.reshape(frame.shape+(1,)) test_datagen = ImageDataGenerator(rescale=1./255) m=test_datagen.flow(frame,batch_size=1) y_pred=model.predict_generator(m,1) histarray2={'PEACE': y_pred[0][0], 'PUNCH': y_pred[0][1], 'STOP': y_pred[0][2], 'Thumbs Up': y_pred[0][3]} update(histarray2) print(classes[list(y_pred[0]).index(y_pred[0].max())]) rval, frame = vc.read() key = cv2.waitKey(20) if key == 27: # exit on ESC break cv2.destroyWindow("preview") vc=None #loading the model
def destroy(self): cv2.destroyWindow(self.winName)
def show_img(img, boxes=None, window_name="Happy Dance Image", msec_to_show_for=1500, save=False, filepath='None'): """Show an image, potentially with surrounding bounding boxes Args: ---- img: np.ndarray boxes (optional): dct of bounding boxes where the keys hold the name (actual or predicted) and the values the coordinates of the boxes window_name (optional): str msec_to_show_for (optioanl): int """ img_copy = img.copy() # Any drawing is inplace. Draw on copy to protect original. if boxes: color_dct = {'actual': (125, 255, 0), 'predicted': (0, 25, 255)} for box_type, box_coords in boxes.items(): cv2.rectangle(img_copy, pt1=(box_coords[0], box_coords[1]), pt2=(box_coords[2], box_coords[3]), color=color_dct[box_type], thickness=2) if not save: cv2.imshow(window_name, img_copy) cv2.waitKey(msec_to_show_for) cv2.destroyWindow(window_name) else: cv2.imwrite(filepath, img_copy)
def capture_frame(): # Open the "default" camera vc = cv2.VideoCapture(0) # Check if we succeeded in opening camera feed if vc.isOpened(): rval, frame = vc.read() else: rval = False # Display captured frames in a new window cv2.namedWindow("Camera Video Feed") while rval: cv2.imshow("Camera Video Feed", frame) # cv2.imshow("Camera Video Feed", result) rval, frame = vc.read() key = cv2.waitKey(20) if key == 27: # User pressed ESC key break elif key == ord('s'): break # Destroy window cv2.destroyWindow("Camera Video Feed") # Close VideoCapture feed -- Important! vc.release() # Save the frame cv2.imwrite('../images/captured_frame.png', frame) return frame
def track_obj(low_hsv, high_hsv): # Open the "default" camera vc = cv2.VideoCapture(0) # Check if we succeeded in opening camera feed if vc.isOpened(): rval, frame = vc.read() else: rval = False # Display captured frames in a new window cv2.namedWindow("Camera Video Feed") # Display filtered object frame in a new window cv2.namedWindow("Tracking") result = frame while rval: cv2.imshow("Camera Video Feed", frame) cv2.imshow("Tracking", result) rval, frame = vc.read() # Convert to HSV space frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Filter out components with values in selected range # Threshold HSV image mask = cv2.inRange(frameHSV, low_hsv, high_hsv) result = cv2.bitwise_and(frame, frame, mask = mask) # Wait for ESC key press key = cv2.waitKey(20) if key == 27: # User pressed ESC key break # Destroy window cv2.destroyWindow("Camera Video Feed") # Close VideoCapture feed -- Important! vc.release()
def klick_landmarks_on_image(): global current_landmark, klicked_landmarks cv2.namedWindow("image") cv2.setMouseCallback("image", click) show_lms_on_image() image = cv2.imread('/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.JPG') for lm_idx in range(68): while True: temp_image = image.copy() lms_to_be_shown = klicked_landmarks#+current_landmark if len(current_landmark)>0: lms_to_be_shown =klicked_landmarks + [current_landmark] if len(lms_to_be_shown)>0: draw_lms_on_image(temp_image, lms_to_be_shown) cv2.imshow("image", temp_image) key = cv2.waitKey(1) & 0xFF if key == ord(" "): if len(current_landmark)>0: klicked_landmarks.append(current_landmark) break if key == ord("q"): return 0 current_landmark=[] cv2.destroyWindow("image") #now write lm file landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts' with open(landmark_file, "w") as lf: lf.write('version: 1\n') lf.write('n_points: 68\n') lf.write('{\n') for landmark in klicked_landmarks: lf.write(str(landmark[0])+" "+str(landmark[1])+"\n") lf.write('}\n')
def main(): all_bb = [] cv2.namedWindow("image") cv2.setMouseCallback("image", click) images = glob.glob('/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/*/frames/000001.png') output_file_path = '/user/HS204/m09113/facer2vm_project_area/data/300VW_Dataset_2015_12_14/bb_clicked_philipp.log' for i, image_path in enumerate(images): print ('image',image_path,'(',i,'of',len(images),')') image = cv2.imread(image_path) upper_left_point, lower_right_point = click_bb_on_image(image) all_bb.append([upper_left_point[0], upper_left_point[1], lower_right_point[0], lower_right_point[1]]) #print (upper_left_point, lower_right_point) open(output_file_path, 'a').write(str(image_path)+' '+str(upper_left_point[0])+' '+str(upper_left_point[1])+' '+str(lower_right_point[0])+' '+str(lower_right_point[1])+'\n') cv2.destroyWindow("image") #now write lm file # landmark_file = '/user/HS204/m09113/Downloads/face_synthesis/M1000_22_L0_V9R_N_small.pts' # with open(landmark_file, "w") as lf: # lf.write('version: 1\n') # lf.write('n_points: 68\n') # lf.write('{\n') # for landmark in klicked_landmarks: # lf.write(str(landmark[0])+" "+str(landmark[1])+"\n") # lf.write('}\n') # return x, y, w, h
def destroy_window(self): cv2.destroyWindow(self._window_name) self._isWindowCreated = False
def _close(self): if self.verbose: print('Closing window') print('\n--------------------------------------') print('Colorspace:', self.cspace) if self.cspace == 'Grayscale': print('Lower bound:', self._lowerb[0]) print('Upper bound:', self._upperb[0]) else: print('Lower bounds:', self._lowerb) print('Upper bounds:', self._upperb) print('--------------------------------------\n') cv2.destroyWindow(self.name)
def main(): window = 'preview' cv2.namedWindow(window) tfrecord_file_names = glob(path.join('data', '*.tfrecord.gz')) max_reads = 50 batch_size = 50 with tf.Graph().as_default() as graph: image_batch, type_batch = import_images(tfrecord_file_names, max_reads=max_reads, batch_size=batch_size) coord = tf.train.Coordinator() with tf.Session(graph=graph) as sess: init = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer()) sess.run(init) threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: while not coord.should_stop(): Xs = sess.run(image_batch) for img in Xs: cv2.imshow(window, img) if (cv2.waitKey(33) & 0xff) == 27: coord.request_stop() break except tf.errors.OutOfRangeError: print('Read all examples.') finally: coord.request_stop() coord.join(threads) cv2.destroyWindow(window)
def extract_video_frames(queue: PriorityQueue, source: int, cap: cv2.VideoCapture, crop: Tuple[int, int, int, int], target_width: int, target_height: int, frame_step: int=1, display_progress: bool=False): window = 'video' if display_progress: cv2.namedWindow(window) while True: success, buffer = cap.read() if not success: break # crop borders buffer = buffer[crop[0]:-crop[2], crop[1]:-crop[3], :] buffer = cv2.resize(buffer, (target_width, target_height), interpolation=cv2.INTER_AREA) frame = cap.get(cv2.CAP_PROP_POS_FRAMES) random_priority = random() queue.put((random_priority, (buffer, source))) if display_progress: cv2.imshow(window, buffer) if (cv2.waitKey(33) & 0xff) == 27: break cap.set(cv2.CAP_PROP_POS_FRAMES, frame + frame_step) if display_progress: cv2.destroyWindow(window)
def run(self): video_capture = cv2.VideoCapture(0) while True: got_a_frame, image = video_capture.read() grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.imshow('camera',grayimage) key = cv2.waitKey(50) if key == 27: break cv2.destroyWindow('camera') # Create new threads
def imshow(im, window_name = 'default'): cv2.imshow(window_name, im) key = cv2.waitKey(0) # print(key) cv2.destroyWindow(window_name) if key ==27: raise Exception('Esc pressed!') return
def review(self, TD_object): """Displays the TD recording overlaid with the annotated track. On events are red, and off events are blue. Takes in: TD_object: An Events object (see eventvision module). """ cv2.namedWindow('review_frame') for i in range(1, len(self.data.ts)): current_frame = np.zeros((TD_object.height,TD_object.width,3), np.uint8) tmin = self.data.ts[i-1] tmax = self.data.ts[i] tminind = np.min(np.where(TD_object.data.ts >= tmin)) tmaxind = np.max(np.where(TD_object.data.ts <= tmax)) # Populate the current frame with all the events which occur between successive timestamps of the # annotated track events. Track event which was saved at the end of the current frame is shown. current_frame[TD_object.data.y[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 1], TD_object.data.x[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 1], :] = [100, 100, 255] current_frame[TD_object.data.y[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 0], TD_object.data.x[tminind:tmaxind][TD_object.data.p[tminind:tmaxind] == 0], :] = [255, 255, 30] cv2.circle(current_frame, (self.data.x[i], self.data.y[i]), 10, (0,255,0), 2) cv2.imshow('review_frame', current_frame) key = cv2.waitKey(1) cv2.destroyWindow('review_frame')
def trackObjects(self): for area in self.trackedAreasList: # Template matching gray = cv2.cvtColor(self.processedFrame, cv2.COLOR_BGR2GRAY) templ = area.getGrayStackAve() cc = cv2.matchTemplate(gray, templ, cv2.TM_CCOEFF_NORMED) cc = cc * cc * cc * cc _, cc = cv2.threshold(cc, 0.1, 0, cv2.THRESH_TOZERO) cc8 = cv2.normalize(cc, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) mask = np.zeros_like(cc8) # Search match within template region mcorn = area.getEnlargedCorners(0) # If not 0, enalrge the search cv2.rectangle(mask, mcorn[0], mcorn[1], 255, -1) _, _, _, mx = cv2.minMaxLoc(cc8, mask) # kp = area.getKalmanPredict() # area.updateWindow(kp) # area.setTemplate(self.processedFrame) # Prevent large spatial jumps (c, r, _, _) = area.getcrwh() jump = 10 if abs(c - mx[0]) < jump and abs(r - mx[1]) < jump: # area.setKalmanCorrect(mx) area.updateWindow(mx) else: # area.setKalmanCorrect((c, r)) area.updateWindow((c, r)) area.setTemplate(self.processedFrame) # Show the template stack if self.showTemplate is True: cv2.imshow('Stack: '+str(area), area.getStack()) else: try: cv2.destroyWindow('Stack: '+str(area)) except: pass # Show the matching results if self.showMatch is True: cv2.rectangle(cc8, mcorn[0], mcorn[1], 255, 1) cv2.circle(cc8, mx, 5, 255, 1) cv2.imshow('Match: '+str(area), cc8) else: try: cv2.destroyWindow('Match: '+str(area)) except: pass # Draw the tracked area on the image corn = area.getCorners() cv2.rectangle(self.workingFrame, corn[0], corn[1], (0, 255, 0), 1) # self.showFrame() # raw_input('wait')
def run(self, update_fun=None): """Start the image viewer. This method blocks until the user requests to close the window. Parameters ---------- update_fun : Optional[Callable[] -> None] An optional callable that is invoked at each frame. May be used to play an animation/a video sequence. """ if update_fun is not None: self._user_fun = update_fun self._terminate, is_paused = False, False # print("ImageViewer is paused, press space to start.") while not self._terminate: t0 = time.time() if not is_paused: self._terminate = not self._user_fun() if self._video_writer is not None: self._video_writer.write( cv2.resize(self.image, self._window_shape)) t1 = time.time() remaining_time = max(1, int(self._update_ms - 1e3*(t1-t0))) cv2.imshow( self._caption, cv2.resize(self.image, self._window_shape[:2])) key = cv2.waitKey(remaining_time) if key & 255 == 27: # ESC print("terminating") self._terminate = True elif key & 255 == 32: # ' ' print("toggeling pause: " + str(not is_paused)) is_paused = not is_paused elif key & 255 == 115: # 's' print("stepping") self._terminate = not self._user_fun() is_paused = True # Due to a bug in OpenCV we must call imshow after destroying the # window. This will make the window appear again as soon as waitKey # is called. # # see https://github.com/Itseez/opencv/issues/4535 self.image[:] = 0 cv2.destroyWindow(self._caption) cv2.waitKey(1) cv2.imshow(self._caption, self.image)
def show_tool_01(real, pred, show_shape): """ """ real_shape = real.shape pred_shape = pred.shape _real = imgs_display(real, show_shape) _pred = gray2rgb(imgs_display(pred, show_shape)) select_imgs = [] def tool(event, x, y, flags, param): inx = int(x/real_shape[1]) iny = int(y/real_shape[2]) def find(): px = inx*pred_shape[1] py = iny*pred_shape[2] p = np.copy(_pred) # cv2.putText(p, "x:%s y:%s px:%s py:%s"%(x,y,px,py), (px,py), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (255, 255, 255), 1) cv2.rectangle(p, (px, py), (px+pred_shape[1], py+pred_shape[2]), (0, 255, 0), 2) cv2.imshow("PRED", p) if event == cv2.EVENT_LBUTTONDOWN: select_imgs = param[0] select_imgs.append(pred[inx + iny*show_shape[1]]) select_imgs = select_imgs[-10*10:] cv2.imshow("SELECT", imgs_display(np.array(select_imgs), [10]*2)) elif event == cv2.EVENT_MOUSEMOVE: find() elif event == cv2.EVENT_LBUTTONUP: # cv2.destroyWindow(win_name) pass cv2.namedWindow("REAL") cv2.setMouseCallback("REAL", tool, [select_imgs]) cv2.imshow("PRED", _pred) # keep looping until the 'q' key is pressed while True: # display the image and wait for a keypress cv2.imshow("REAL", _real) key = cv2.waitKey(1) & 0xFF if key == ord('q'): break cv2.destroyAllWindows()
def main(): window = 'preview' cv2.namedWindow(window) tfrecord_file_names = glob(path.join('data', '*-2.tfrecord.gz')) max_reads = 200 batch_size = 50 with tf.Graph().as_default() as graph: image_batch, type_batch = import_images(tfrecord_file_names, max_reads=max_reads, batch_size=batch_size) import_graph('exported/vae-refine.pb', input_map={'image_batch': image_batch}, prefix='process') phase_train = graph.get_tensor_by_name('process/mogrify/vae/phase_train:0') embedding = graph.get_tensor_by_name('process/mogrify/vae/variational/add:0') reconstructed = graph.get_tensor_by_name('process/mogrify/clip:0') reconstructed.set_shape((None, 180, 320, 3)) refined = graph.get_tensor_by_name('process/refine/y:0') refined.set_shape((None, 180, 320, 3)) coord = tf.train.Coordinator() with tf.Session(graph=graph) as sess: init = tf.group(tf.local_variables_initializer(), tf.global_variables_initializer()) sess.run(init) threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: print('Evaluating ...') while not coord.should_stop(): # fetching the embeddings given the inputs ... reference, coeffs = sess.run([image_batch, embedding], feed_dict={phase_train: False}) # ... then salting the embeddings ... coeffs += np.random.randn(coeffs.shape[0], coeffs.shape[1]) # ... then fetching the images given the new embeddings. results = sess.run(refined, feed_dict={phase_train: False, embedding: coeffs}) assert reference.shape == results.shape reference = reference[:3] results = results[:3] canvas = example_gallery(reference, results) cv2.imshow(window, canvas) if (cv2.waitKey(1000) & 0xff) == 27: print('User requested cancellation.') coord.request_stop() break except tf.errors.OutOfRangeError: print('Read all examples.') finally: coord.request_stop() coord.join(threads) coord.wait_for_stop() cv2.destroyWindow(window)