我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.destroyAllWindows()。
def get_points(): # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*8,3), np.float32) objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('calibration_wide/GO*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (8,6), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (8,6), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() return objpoints, imgpoints
def CaptureImage(): imageName = 'DontCare.jpg' #Just a random string cap = cv2.VideoCapture(0) while(True): # Capture frame-by-frame ret, frame = cap.read() #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome rgbImage = frame #For capture the image in RGB color space # Display the resulting frame cv2.imshow('Webcam',rgbImage) #Wait to press 'q' key for capturing if cv2.waitKey(1) & 0xFF == ord('q'): #Set the image name to the date it was captured imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg' #Save the image cv2.imwrite(imageName, rgbImage) break # When everything done, release the capture cap.release() cv2.destroyAllWindows() #Returns the captured image's name return imageName
def do_key_press(symbol, modifiers): global cur_vector print("SO: {}".format(symbol)) if(symbol == key.R): if theApp.use_camera: theApp.set_camera_recording(not theApp.camera_recording) if(symbol == key.T): theApp.show_camera = not theApp.show_camera elif(symbol == key.SPACE): print("SPACEBAR") snapshot(None); elif(symbol == key.ESCAPE): print("ESCAPE") cv2.destroyAllWindows() if theApp.use_camera: cv2.VideoCapture(0).release() sys.exit(0)
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27): cap = cv2.VideoCapture(src) while(1): ret, frame = cap.read() # To speed up processing; Almost real-time on my PC frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5) frame = cv2.flip(frame, 1) out = func(frame, args) if out is None: continue out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4) cv2.imshow(win_name, out) cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2) k = cv2.waitKey(delim_wait) if k == delim_key: cv2.destroyAllWindows() cap.release() return
def cvCaptureVideo(): capture = cv2.VideoCapture(0) if capture.isOpened() is False: raise("IO Error") cv2.namedWindow("Capture", cv2.WINDOW_NORMAL) while True: ret, image = capture.read() if ret == False: continue cv2.imshow("Capture", image) if cv2.waitKey(1) & 0xFF == ord('q'): break capture.release() cv2.destroyAllWindows() # Matplot???Web????????????
def MoG2(vid, min_thresh=800, max_thresh=10000): ''' Args : Video object and threshold parameters Returns : None ''' cap = cv2.VideoCapture(vid) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) fgbg = cv2.createBackgroundSubtractorMOG2() connectivity = 4 while(cap.isOpened()): ret, frame = cap.read() if not ret: break fgmask = fgbg.apply(frame) fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) output = cv2.connectedComponentsWithStats( fgmask, connectivity, cv2.CV_32S) for i in range(output[0]): if output[2][i][4] >= min_thresh and output[2][i][4] <= max_thresh: cv2.rectangle(frame, (output[2][i][0], output[2][i][1]), ( output[2][i][0] + output[2][i][2], output[2][i][1] + output[2][i][3]), (0, 255, 0), 2) cv2.imshow('detection', frame) cap.release() cv2.destroyAllWindows()
def get_fps(source, Videolength): cap = cv2.VideoCapture(source) frame_counter = 0 print "Calculating Frames per second . . . " while (True): # Capture frame-by-frame ret, frame = cap.read() if not ret: break frame_counter += 1 cap.release() cv2.destroyAllWindows() fps = float(frame_counter/Videolength) print "\nFPS is " +str(fps)+"\n" return fps #Algorithm to check intersection of line segments #It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
def get_fps(source, Videolength): cap = cv2.VideoCapture("docs/video/traffic2") frame_counter = 0 print "Calculating Frames per second . . . " while (True): # Capture frame-by-frame ret, frame = cap.read() if not ret: break frame_counter += 1 cap.release() cv2.destroyAllWindows() fps = float(frame_counter/Videolength) print "\nFPS is " +str(fps)+"\n" return fps #Algorithm to check intersection of line segments #It checks iteratively intersection between a pair of points(Last location of the vehicle) and pairs of points of another List(Pedestrian path)
def main(): NetworkTable.setIPAddress('10.19.37.2') NetworkTable.setClientMode() NetworkTable.initialize() sd = NetworkTable.getTable('SmartDashboard') #ms_list = [] while True: time.sleep(0.1) start_time = datetime.now() # returns the elapsed milliseconds since the start of the program vision(sd) dt = datetime.now() - start_time ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0 #ms_list.append(ms) print ms #print np.mean(ms_list) cv2.destroyAllWindows()
def main(): NetworkTable.setIPAddress('10.19.37.2') NetworkTable.setClientMode() NetworkTable.initialize() sd = NetworkTable.getTable('SmartDashboard') #ms_list = [] while True: time.sleep(0.1) start_time = datetime.now() # returns the elapsed milliseconds since the start of the program vision(sd) dt = datetime.now() - start_time ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0 print ms cv2.destroyAllWindows()
def color_quant(input,K,output): img = cv2.imread(input) Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) cv2.imshow('res2',res2) cv2.waitKey(0) cv2.imwrite(output, res2) cv2.destroyAllWindows()
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None): '''Generate test images as solid blocks of colour of known size, save to filename fn.''' # Create a zero (black) image of HD size with 3 colour dimensions. Colour space assumed BGR by default. h = 1080 w = 1920 img = np.zeros((h,w,3),dtype="uint8") # Want to set all of the pixels to bgr tuple, default red, 8 bit colour if not bgr: bgr = [0,0,255] img[:,:] = bgr vw = ImageViewer(img) vw.windowShow() #cv2.imshow("zeroes", frame) #ch = 0xff & cv2.waitKey(10000) #cv2.destroyAllWindows() cv2.imwrite(fn, img)
def show_cut_img(img_name): img = cv2.imread(img_name, 0) cut_img = cut(img) cv2.imshow('cut image', cut_img) cv2.waitKey(0) cv2.destroyAllWindows() return cut_img # ??????????????????id??logoDirs????
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow("result", im) cv2.waitKey() cv2.destroyAllWindows()
def save_images(self, dirname='dump'): import os img_no = 1 # Makes the directory if not os.path.exists('./' + dirname): os.mkdir(dirname) while True: self.grab_frame() if self.debug: cv2.imshow('frame', self.img) k = cv2.waitKey(1) & 0xFF if k == ord('s'): cv2.imwrite(os.path.join(dirname, 'dump_' + str(img_no) + '.jpg'), self.img) img_no += 1 elif k == ord('q'): break cv2.destroyAllWindows() # Destructor
def process_video(path_to_video): cap = cv2.VideoCapture(path_to_video) # Load video while True: ret, frame = cap.read() print frame if ret is False or (cv2.waitKey(30) & 0xff) == 27: break # Exit if the video ended mask = np.zeros_like(frame) # init mask contours = find_contours(frame) plates, plates_images, mask = find_plate_numbers(frame, contours, mask) print "Plate Numbers: %s" % ", ".join(plates) processed_frame = cv2.add(frame, mask) # Apply the mask to image cv2.imshow('frame', processed_frame) cv2.destroyAllWindows() cap.release() ########################################### # Run The Program ######################### ###########################################
def show_img(): global face_rect #???????????????? while True: img = cv.QueryFrame(cam)# ???????? #???????? src=cv.CreateImage((img.width, img.height), 8, 3) cv.Resize(img,src,cv.CV_INTER_LINEAR) #?????? gray=cv.CreateImage((img.width, img.height), 8, 1) cv.CvtColor(img, gray, cv.CV_BGR2GRAY)#?rgb??????? cv.EqualizeHist(gray,gray)#???????????? rects = detect(gray, cascade)#??????????????????????????? face_rect=rects #????????? draw_rects(src, rects, (0, 255, 0)) #??????? cv.ShowImage('DeepFace Wang_jun_qian', src) cv2.waitKey(5) == 27 cv2.destroyAllWindows()
def get_frames_every_x_sec(video, secs=1, fmt='opencv'): vidcap = cv2.VideoCapture(video) fps = get_frame_rate(vidcap) inc = int(fps * secs) length = int(vidcap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) count = 0 while vidcap.isOpened() and count <= length: if count % inc == 0: success, image = vidcap.read() if success: cv2_im = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if fmt == 'PIL': im = Image.fromarray(cv2_im) #elif fmt == 'DISK': #cv2.imwrite(os.path.join(path_output_dir, '%d.png') % count, image) else: im = cv2_im yield count, im else: break count += 1 cv2.destroyAllWindows() vidcap.release() # image region: img = img[c1:c1+25,r1:r1+25] # roi = gray[y1:y2, x1:x2]
def pick_corrs(images, n_pts_to_pick=4): data = [ [[], 0, False, False, False, image, "Image %d" % i, n_pts_to_pick] for i, image in enumerate(images)] for d in data: win_name = d[6] cv2.namedWindow(win_name) cv2.setMouseCallback(win_name, corr_picker_callback, d) cv2.startWindowThread() cv2.imshow(win_name, d[5]) key = None while key != '\n' and key != '\r' and key != 'q': key = cv2.waitKey(33) key = chr(key & 255) if key >= 0 else None cv2.destroyAllWindows() if key == 'q': return None else: return [d[0] for d in data]
def main(args): saveFace = None; cap = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') while(True): # Capture frame-by-frame ret, frame = cap.read() faces = face_cascade.detectMultiScale(frame, 1.3, 5) if len(faces) > 0: saveFace = frame break; # Display the resulting frame cv2.imshow('frame',frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything done, release the capture cap.release() cv2.destroyAllWindows() cv2.imwrite('C:/Users/USER/Desktop/facenet-RealTime/src/face_data/saveFace.jpg',frame) mypath = 'C:/Users/USER/Desktop/facenet-RealTime/src/face_data' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))] myImage = [] for file in onlyfiles: isImage = None file = mypath + '/' + file isImage = imghdr.what(file) if isImage != None: myImage.append(file) #begin facenet cp.main(args,myImage);
def evaluate(img_col, args): numpy.seterr(all='ignore') assert isinstance(img_col, numpy.ndarray), 'img_col must be a numpy array' assert img_col.ndim == 3, 'img_col must be a color image ({0} dimensions currently)'.format(img_col.ndim) assert isinstance(args, argparse.Namespace), 'args must be of type argparse.Namespace not {0}'.format(type(args)) img_gry = cv2.cvtColor(img_col, cv2.COLOR_RGB2GRAY) rows, cols = img_gry.shape crow, ccol = rows/2, cols/2 f = numpy.fft.fft2(img_gry) fshift = numpy.fft.fftshift(f) fshift[crow-75:crow+75, ccol-75:ccol+75] = 0 f_ishift = numpy.fft.ifftshift(fshift) img_fft = numpy.fft.ifft2(f_ishift) img_fft = 20*numpy.log(numpy.abs(img_fft)) if args.display and not args.testing: cv2.destroyAllWindows() scripts.display('img_fft', img_fft) scripts.display('img_col', img_col) cv2.waitKey(0) result = numpy.mean(img_fft) return img_fft, result, result < args.thresh
def end_game(self): """ When everything is done, release the capture. """ if not self.piCam: self.cam.release() quit_coord = (self.screenwidth // 4, self.screenheight // 3) try: draw_text(quit_coord, self.photo, "Press any key to quit_", font_scale=1) except AttributeError: cv2.destroyAllWindows() # self.presentation(frame) # self.photo = self.overlayUI(self.photo) else: self.piCamera.close() cv2.imshow("PartyPi", self.photo) cv2.waitKey(0) cv2.destroyAllWindows()
def read(): db = shelve.open(filename) imgs = db['imgs'] data = db['data'] for i in range(len(imgs)): d = data[i] print(i, d) img = imgs[i] img = np.fromstring(img, np.uint8) frame = cv2.imdecode(img, 1) print('frame[{}] {}'.format(i, frame.shape)) cv2.imshow('camera', frame) cv2.waitKey(300) print('bye ...') cv2.destroyAllWindows() db.close()
def record(cam, runtime, mat): vid = tp.PyERROR_CODE.PyERROR_CODE_FAILURE out = False while vid != tp.PyERROR_CODE.PySUCCESS and not out: filepath = input("Enter filepath name: ") vid = cam.enable_recording(filepath) print(repr(vid)) if vid == tp.PyERROR_CODE.PySUCCESS: print("Recording started...") out = True print("Hit spacebar to stop recording: ") key = False while key != 32: # for spacebar err = cam.grab(runtime) if err == tp.PyERROR_CODE.PySUCCESS: cam.retrieve_image(mat) cv2.imshow("ZED", mat.get_data()) key = cv2.waitKey(5) cam.record() else: print("Help: you must enter the filepath + filename + SVO extension.") print("Recording not started.") cam.disable_recording() print("Recording finished.") cv2.destroyAllWindows()
def debug_face_classifier(file): face_cascade = cv2.CascadeClassifier(xml_face_classifier) image = cv2.imread(file) image = imutils.resize(image, width=500) gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(image, 1.07, 3) print faces for (x, y, w, h) in faces: cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2) #roi_gray = gray[y:y+h, x:x+w] #roi_color = image[y:y+h, x:x+w] cv2.imshow('Image', image) cv2.waitKey(0) cv2.destroyAllWindows()
def image(self): img = cv2.imread(self.image_path) img = imutils.resize(img,width=min(800,img.shape[1])) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray,(21,21),0) fullbody = self.HogDescriptor(gray) for (x,y,w,h) in fullbody: cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2) faces = self.haar_facedetection(gray) for (x,y,w,h) in faces: cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = self.haar_eyedetection(roi_gray) for (ex,ey,ew,eh) in eyes: cv2.rectangle(roi_color, (ex,ey), (ex+ew,ey+eh), (0,255,0),2) smile = self.haar_smilecascade(roi_gray) for (sx,sy,sw,sh) in smile: cv2.rectangle(roi_color, (sx,sy), (sx+sw,sy+sh),(0,255,0),2) img = self.dlib_function(img) cv2.imshow('img',img) cv2.waitKey(0) cv2.destroyAllWindows()
def start_video(self, model): camera = cv2.VideoCapture(0) while True: frame = camera.read()[1] if frame is None: continue image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) image_array = cv2.resize(image_array, (300, 300)) image_array = substract_mean(image_array) image_array = np.expand_dims(image_array, 0) predictions = model.predict(image_array) detections = detect(predictions, self.prior_boxes) plot_detections(detections, frame, 0.6, self.arg_to_class, self.colors) cv2.imshow('webcam', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break camera.release() cv2.destroyAllWindows()
def test(path): cap = cv2.VideoCapture(path_video) testing=[] while(True): ret, frame = cap.read() res=cv2.resize(frame,(250,250)) gray_image = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) xarr=np.squeeze(np.array(gray_image).astype(np.float32)) m,v=cv2.PCACompute(xarr) arr= np.array(v) flat_arr= arr.ravel() testing.append(flat_arr) #cv2.imshow('frame', frame) #if cv2.waitKey(1) & 0xFF == ord("q"): # break #cap.release() #cv2.destroyAllWindows() logos=svm.predict(testing) uniqlogos=list(set(logos)) for i in uniqlogos: print(i)
def cluster(frame_matrix): new_frame_matrix = [] i = 0 for frame in frame_matrix: print "reader {} frame".format(i) i += 1 Z = frame.reshape((-1, 1)) Z = np.float32(Z) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0) K = 2 ret, label, center = cv2.kmeans(Z, K, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((frame.shape)) new_frame_matrix.append(res2) cv2.imshow('res2', res2) cv2.waitKey(1) cv2.destroyAllWindows()
def face_train_video(train_path,subject,max_train,stream): cap = cv2.VideoCapture(stream) ret=True ctr = 0 # minimum 10 frames/images per video while(ctr < max_train): # read till end of frames ret, img = cap.read() if not ret: break gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) cv2.imshow("Recognizing Face", img) cv2.waitKey(10) cv2.imwrite( join(train_path,subject)+ "." + str(ctr) +".jpg",img) # writes image to disk ctr = ctr + 1 cap.release() cv2.destroyAllWindows() # predict live feed
def simulate(self): """Displays termite trail recorded points at a black arena. Args: None. Returns: None. """ self.video_source = video.VideoPlayer(self.params['original_video_path'], self.params['output_path'], self.params['arena_size'], [], True, 'MOG') simulation_length = min(len(x.trail) for x in self.termites) self.current_step = 0 while self.current_step < simulation_length: self.background = np.zeros((self.params['arena_size'][1], self.params['arena_size'][0], 3), np.uint8) self.draw() self.show() self.current_step += 1 self.video_source.next_frame() cv2.destroyAllWindows()
def splitimg(im_inp,n_row,n_col): #determine size of input image h_img, w_img = im_inp.shape[:2] #determine size of each cropped image h_row = h_img / num_rows w_col = w_img / num_cols #declare fragmented image matrix img_frag = np.empty((num_rows, num_cols, h_row, w_col), dtype=np.uint8) #fragments input image and put it into matrix for i in range(0, num_rows): h0 = h_row * i h1 = h_row * (i + 1) for j in range(0, num_cols): w0 = w_col * j w1 = w_col * (j + 1) img_frag[i, j] = im_inp[h0:h1, w0:w1] #uncomment following lines for debugging to show image # cv2.imshow('image1', img_frag[i, j]) # cv2.waitKey(0) # cv2.destroyAllWindows() return img_frag
def mask_bg(object_window,img) : ''' This function outputs the surrounding pixels Basically, image of background with masked target object''' global h_img,w_img x,y,w,h=object_window h_bg=h*2 w_bg=2*w h_=0.5*h w_=0.5*w x_bg=int(max(x-(w_),0)) y_bg=int(max(y-(h_),0)) x_bg1=int(min(x_bg+w_bg,w_img-1)) y_bg1=int(min(y_bg+h_bg,h_img-1)) img[y:y+h,x:x+w]=0 #print object_window #print x_bg,y_bg,x_bg1,y_bg1,img.shape bg_img=img[y_bg:y_bg1,x_bg:x_bg1] #cv2.imshow("masked_background",bg_img) #cv2.waitKey(0) #cv2.destroyAllWindows() return bg_img
def visualize_image(image, name="Image", resize=False, save_image=False, path=None): """Helper function to visualize and save any image""" image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT]) image = image.astype(np.uint8) if resize: image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10)) cv2.imshow(name, image) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() if save_image: assert path is not None cv2.imwrite(path, image)
def image_preview(image): cv2.imshow('Image preview', image) cv2.waitKey(0) cv2.destroyAllWindows()
def display_solution(square_borders, start_grid, solution, image): """ Writes the solution to an image and displays said image. Params: square_borders -- A list containing the borders of all squares start_grid -- A list containing the sudoku starting values solution -- A list containing the sudoku solution image -- The image to write to """ cur_row = 0 cur_col = 0 for i, b in enumerate(square_borders): x, y, x2, y2 = b # Tuple unpacking # Calculate bottom-left position for text text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10 # Bottom-left corner for text position org = (text_x, text_y) # Only write text if the position was not set in the start_grid if start_grid[cur_row][cur_col] is 0: value = str(solution[cur_row][cur_col]) cv2.putText( img=image, text=value, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=2) cur_col += 1 if cur_col % 9 == 0: cur_row += 1 cur_col = 0 cv2.imshow('Solution', image) cv2.waitKey(0) cv2.destroyAllWindows()
def close(self): self.cap.release() cv2.destroyAllWindows() print("Closing camera")
def close(self): cv2.destroyAllWindows() print("Closing window")
def main(): parser = argparse.ArgumentParser(description='Visualizes the line for hough transform.') parser.add_argument('filename') args = parser.parse_args() img = cv2.imread(args.filename, cv2.IMREAD_GRAYSCALE) cv2.imshow('input', img) edge_finder = EdgeFinder(img, filter_size=13, threshold1=28, threshold2=115) print "Edge parameters:" print "GaussianBlur Filter Size: %f" % edge_finder.filterSize() print "Threshold1: %f" % edge_finder.threshold1() print "Threshold2: %f" % edge_finder.threshold2() (head, tail) = os.path.split(args.filename) (root, ext) = os.path.splitext(tail) smoothed_filename = os.path.join("output_images", root + "-smoothed" + ext) edge_filename = os.path.join("output_images", root + "-edges" + ext) cv2.imwrite(smoothed_filename, edge_finder.smoothedImage()) cv2.imwrite(edge_filename, edge_finder.edgeImage()) cv2.destroyAllWindows()
def show_image(image, scale=1.0, window_title='Image'): """Display given image in a window. Arguments: image () -- Image to display. scale (float) -- Magnification of image. window_title (str) -- Title of window. """ scaled_image = scale_image(image, scale) cv2.imshow(window_title, scaled_image) cv2.waitKey(0) cv2.destroyAllWindows()
def show_image(im, name='image'): cv2.imshow(name, im) cv2.waitKey(0) cv2.destroyAllWindows()
def showImage(img,caption='image'): cv2.imshow(caption,img) cv2.waitKey(0) cv2.destroyAllWindows() # Matches a template of cross to detect inner grid lines and then removes them via flood filling
def threadConsumer(self): """ Thread that consumes the frames, estimate the pose and display :return: None """ while True: if self.stop.value: break try: frm = self.queue.get(block=False) except: if not self.stop.value: continue else: break startp = time.time() pose = self.estimatePose(frm['crop']) * self.config['cube'][2]/2. + frm['com3D'] print("{}ms pose".format((time.time() - startp)*1000.)) # Display the resulting frame starts = time.time() img = self.show(frm['frame'], pose, frm['M']) img = self.addStatusBar(img) cv2.imshow('frame', img) self.lastshow = time.time() self.processKey(cv2.waitKey(1) & 0xFF) print("{}ms display".format((time.time() - starts)*1000.)) cv2.destroyAllWindows() print "Exiting consumer..." return True
def show_bbox_landmark(list_file, path_data): with open(list_file, 'r') as f: annotations = f.readlines() num = len(annotations) print "%d pics in total" % num # random.shuffle(annotations) for line in annotations: line_split = line.strip().split(' ') print line_split[0] path_full = os.path.join(path_data, line_split[0]) datum = cv2.imread(path_full) classes = float(line_split[1]) bbox = [float(x) for x in line_split[2:6]] landmarks = [float(x) for x in line_split[6:]] print classes print bbox print landmarks (h, w, c) = datum.shape if (bbox[0] != -1): x1 = bbox[0] * w y1 = bbox[1] * h x2 = bbox[2] * w + w y2 = bbox[3] * h + h cv2.rectangle(datum, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 1) if (landmarks[0] != -1): for i in range(5): cv2.circle(datum, (int(landmarks[i] * w), int(landmarks[i + 5] * h)), 2, (255, 0, 0)) cv2.imshow(str(line_split[0]), datum) cv2.waitKey(0) cv2.destroyAllWindows()
def face_recognize(self): cap = cv2.VideoCapture(self.index) face_cascade = cv2.CascadeClassifier(self.cascade) ''' face_cascade: cascade is entered here for further use. ''' while(True): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ''' Converts coloured video to black and white(Grayscale). ''' if np.any(face_cascade.detectMultiScale(gray, 1.3, 5)): print("Cascade found") self.dispatch('on_match') cv2.destroyAllWindows() for i in range(1, 5): cv2.waitKey(1) break else: print("Not recognized") cv2.imshow('frame', frame) #Comment the above statement not to show the camera screen if cv2.waitKey(1) & 0xFF == ord('q'): print("Forcefully Closed") cv2.destroyAllWindows() for i in range(1, 5): cv2.waitKey(1) break cap.release()
def ToGrayImage(path): image = cv2.imread(path) gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # cv2.imwrite('gray_image.jpg',gray_image) # cv2.imshow('color_image',image) # cv2.imshow('gray_image',gray_image) # cv2.waitKey(0) # Waits forever for user to press any key # cv2.destroyAllWindows() # Closes displayed windows return gray_image
def __init__(self, matric_num): WHITE = [255, 255, 255] face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml') eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml') ID = NameFind.AddName(matric_num) Count = 0 cap = cv2.VideoCapture(0) # Camera object self.__trainer__ = None if not os.path.exists('dataSet'): os.makedirs('dataSet') while True: ret, img = cap.read() gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert the Camera to grayScale faces = face_cascade.detectMultiScale(gray, 1.3, 5) # Detect the faces and store the positions for (x, y, w, h) in faces: # Frames LOCATION X, Y WIDTH, HEIGHT FaceImage = gray[y - int(h / 2): y + int(h * 1.5), x - int(x / 2): x + int(w * 1.5)] # The Face is isolated and cropped Img = (NameFind.DetectEyes(FaceImage)) cv2.putText(gray, "FACE DETECTED", (x + (w / 2), y - 5), cv2.FONT_HERSHEY_DUPLEX, .4, WHITE) if Img is not None: frame = Img # Show the detected faces else: frame = gray[y: y + h, x: x + w] cv2.imwrite("dataSet/" + matric_num.replace('/', '') + "." + str(ID) + "." + str(Count) + ".jpg", frame) Count = Count + 1 # cv2.waitKey(300) cv2.imshow("CAPTURED PHOTO", frame) # show the captured image cv2.imshow('Face Recognition System Capture Faces', gray) # Show the video if Count == 150: Trainer() break if cv2.waitKey(1) & 0xFF == ord('q'): break print 'FACE CAPTURE FOR THE SUBJECT IS COMPLETE' cap.release() cv2.destroyAllWindows()
def __init__(self): face_cascade = cv2.CascadeClassifier('Haar/haarcascade_frontalcatface.xml') eye_cascade = cv2.CascadeClassifier('Haar/haarcascade_eye.xml') recognise = cv2.face.createEigenFaceRecognizer(15, 4000) # creating EIGEN FACE RECOGNISER recognise.load("Recogniser/trainingDataEigan.xml") # Load the training data # ------------------------- START THE VIDEO FEED ------------------------------------------ cap = cv2.VideoCapture(0) # Camera object # cap = cv2.VideoCapture('TestVid.wmv') # Video object ID = 0 while True: ret, img = cap.read() # Read the camera object gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Convert the Camera to gray faces = face_cascade.detectMultiScale(gray, 1.3, 5) # Detect the faces and store the positions for (x, y, w, h) in faces: # Frames LOCATION X, Y WIDTH, HEIGHT # ------------ BY CONFIRMING THE EYES ARE INSIDE THE FACE BETTER FACE RECOGNITION IS GAINED ------------------ gray_face = cv2.resize((gray[y: y + h, x: x + w]), (110, 110)) # The Face is isolated and cropped eyes = eye_cascade.detectMultiScale(gray_face) for (ex, ey, ew, eh) in eyes: ID, conf = recognise.predict(gray_face) # Determine the ID of the photo NAME = NameFind.ID2Name(ID, conf) NameFind.DispID(x, y, w, h, NAME, gray) cv2.imshow('EigenFace Face Recognition System', gray) # Show the video if cv2.waitKey(1) & 0xFF == ord('q'): # Quit if the key is Q break cap.release() cv2.destroyAllWindows()