我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.WINDOW_NORMAL。
def rgb_callback(self,data): try: self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY) rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None) cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL) cv2.imshow('rgb_img',self.rgb_img) cv2.waitKey(5) if rgb_ret == True: rgb_tempimg = self.rgb_img.copy() cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria) cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret) rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist) self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec) print("The world coordinate system's origin in camera's coordinate system:") print("===rgb_camera rvec:") print(rgb_rvec) print("===rgb_camera rmat:") print(self.rgb_rmat) print("===rgb_camera tvec:") print(self.rgb_tvec) print("rgb_camera_shape: ") print(self.rgb_img.shape) print("The camera origin in world coordinate system:") print("===camera rmat:") print(self.rgb_rmat.T) print("===camera tvec:") print(-np.dot(self.rgb_rmat.T, self.rgb_tvec)) rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w") data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()} yaml.dump(data, rgb_stream) cv2.imshow('rgb_img',rgb_tempimg) cv2.waitKey(5)
def run(self): cv2.namedWindow("display", cv2.WINDOW_NORMAL) cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse) cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale) if self.extra_queue: cv2.namedWindow("extra", cv2.WINDOW_NORMAL) while True: # wait for an image (could happen at the very beginning when the queue is still empty) while len(self.queue) == 0: time.sleep(0.1) im = self.queue[0] cv2.imshow("display", im) k = cv2.waitKey(6) & 0xFF if k in [27, ord('q')]: rospy.signal_shutdown('Quit') elif k == ord('s'): self.opencv_calibration_node.screendump(im) if self.extra_queue: if len(self.extra_queue): extra_img = self.extra_queue[0] cv2.imshow("extra", extra_img)
def ir_callback(self,data): try: self.ir_img = self.mkgray(data) except CvBridgeError as e: print(e) ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num)) cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL) cv2.imshow('ir_img',self.ir_img) cv2.waitKey(5) if ir_ret == True: ir_tempimg = self.ir_img.copy() cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist) self.ir_rmat, _ = cv2.Rodrigues(ir_rvec) print("The world coordinate system's origin in camera's coordinate system:") print("===ir_camera rvec:") print(ir_rvec) print("===ir_camera rmat:") print(self.ir_rmat) print("===ir_camera tvec:") print(self.ir_tvec) print("ir_camera_shape: ") print(self.ir_img.shape) print("The camera origin in world coordinate system:") print("===camera rmat:") print(self.ir_rmat.T) print("===camera tvec:") print(-np.dot(self.ir_rmat.T, self.ir_tvec)) depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w") data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()} yaml.dump(data, depth_stream) cv2.imshow('ir_img',ir_tempimg) cv2.waitKey(5)
def get_start_points(image): window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL) cv2.resizeWindow(MAZE_NAME, 900,900) cv2.imshow(MAZE_NAME,image) cv2.moveWindow(MAZE_NAME,100,100) print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)") while(True): key = cv2.waitKey(0) if key == ord('a'): print("Using Default Start and End Points") imageProcessor = ImageProcessor(image) start_x,start_y = imageProcessor.getDefaultStart(image) end_x, end_y = imageProcessor.getDefaultEnd(image) print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y))) break elif key == ord ('s'): print("Please select a start point") start_x,start_y = get_user_selected_point(image) print ("Start Point: {0}, please select an end point".format((start_x,start_y))) end_x,end_y = get_user_selected_point(image) print("End Pont: {0}".format((end_x,end_y))) break else: print("Invalid") continue cv2.destroyAllWindows() return start_x,start_y,end_x,end_y
def cvCaptureVideo(): capture = cv2.VideoCapture(0) if capture.isOpened() is False: raise("IO Error") cv2.namedWindow("Capture", cv2.WINDOW_NORMAL) while True: ret, image = capture.read() if ret == False: continue cv2.imshow("Capture", image) if cv2.waitKey(1) & 0xFF == ord('q'): break capture.release() cv2.destroyAllWindows() # Matplot???Web????????????
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_NORMAL) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
def evaluate_images(): ssd = SSD() cv2.namedWindow("outputs", cv2.WINDOW_NORMAL) loader = coco.Loader(False) test_batches = loader.create_batches(1, shuffle=True) global i2name i2name = loader.i2name while True: batch = test_batches.next() imgs, anns = loader.preprocess_batch(batch) pred_labels_f, pred_locs_f, step = ssd.sess.run([ssd.pred_labels, ssd.pred_locs, ssd.global_step], feed_dict={ssd.imgs_ph: imgs, ssd.bn: False}) boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0]) draw_outputs(imgs[0], boxes_, confidences_, wait=0)
def show_webcam(address): cam = webcam.WebcamStream(address) cam.start_stream_threads() ssd = SSD() global i2name i2name = pickle.load(open("i2name.p", "rb")) cv2.namedWindow("outputs", cv2.WINDOW_NORMAL) boxes_ = None confidences_ = None while True: sample = cam.image resized_img = skimage.transform.resize(sample, (image_size, image_size)) pred_labels_f, pred_locs_f = ssd.sess.run([ssd.pred_labels, ssd.pred_locs], feed_dict={ssd.imgs_ph: [resized_img], ssd.bn: False}) boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0], boxes_, confidences_) resize_boxes(resized_img, sample, boxes_) draw_outputs(np.asarray(sample) / 255.0, boxes_, confidences_, wait=10)
def run_main(): # First initialize the camera capture object with the cv2.VideoCapture class. camera = cv2.VideoCapture(camera_port) #To create a new window for display #Disabled it for now as we can't see the image on the cap #cv2.namedWindow(display, cv2.WINDOW_NORMAL) # Discard the first few frame to adjust the camera light levels for i in xrange(ramp_frames): temp = get_image(camera) #Now take the image and save it after every 1 second while(1): try: time.sleep(1) take_save_image(camera) except Exception, e: print "Exception occured \n" print e pass
def setupWindow(): filename = getUserSelectedImage() imageProcessor = ImageProcessor(cv2.imread(filename,0)) colourImage = cv2.imread(filename,1) image = imageProcessor.getThresholdedImage(False) granularity = imageProcessor.get_granularity(image, 100) print("Granularity: {0}".format(granularity)) start_x,start_y,end_x,end_y = get_start_points(image) image = imageProcessor.encloseMaze(image) mazerunner = MazeSolver.MazeSolver(image,granularity) solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y) if(not solution): cv2.imshow(MAZE_NAME,image) else: solvedImage = draw_solution(solution, colourImage) solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage) solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage) window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL) cv2.resizeWindow("Solved Image", 900,900) cv2.moveWindow("Solved Image",100,100) cv2.imshow("Solved Image",solvedImage) print "Press any key to exit" cv2.waitKey(0) cv2.destroyAllWindows
def display(win_name, img): cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) cv2.imshow( win_name, numpy.asarray( img[:,:] )) k=-1 while k ==-1: k=waitkey() cv2.destroyWindow(win_name) if k in [27, ord('q')]: rospy.signal_shutdown('Quit')
def cvShowImageColor(image_file): image_bgr = cv2.imread(image_file) cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow('image', image_bgr) cv2.waitKey(0) cv2.destroyAllWindows() # OpenCV???????????????
def cvShowImageGray(image_file): image_gray = cv2.imread(image_file, 0) cv2.namedWindow('image', cv2.WINDOW_NORMAL) cv2.imshow('image', image_gray) cv2.waitKey(0) cv2.destroyAllWindows() # Matplot??????????
def run(self): print ("VEDIO server starts...") while True: try: self.sock.connect(self.ADDR) break except: time.sleep(3) continue print ("video server <-> remote server success connected...") check = "F" check = self.sock.recv(1) if check.decode("utf-8") != "S": return data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_NORMAL) while True: while len(data) < payload_size: data += self.sock.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += self.sock.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) try: cv2.imshow('Remote', frame) except: pass if cv2.waitKey(1) & 0xFF == 27: break
def run(self): print ("VEDIO client starts...") while True: try: self.sock.connect(self.ADDR) break except: time.sleep(3) continue print ("video client <-> remote server success connected...") check = "F" check = self.sock.recv(1) if check.decode("utf-8") != "S": return print ("receive authend") #self.cap = cv2.VideoCapture(0) self.cap = cv2.VideoCapture("test.mp4") if self.showme: cv2.namedWindow('You', cv2.WINDOW_NORMAL) print ("remote VEDIO client connected...") while self.cap.isOpened(): ret, frame = self.cap.read() if self.showme: cv2.imshow('You', frame) if cv2.waitKey(1) & 0xFF == 27: self.showme = False cv2.destroyWindow('You') if self.level > 0: frame = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx) data = pickle.dumps(frame) zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION) try: self.sock.sendall(struct.pack("L", len(zdata)) + zdata) print("video send ", len(zdata)) except: break for i in range(self.interval): self.cap.read()
def run(self): while True: try: self.sock.connect(self.ADDR) break except: time.sleep(3) continue if self.showme: cv2.namedWindow('You', cv2.WINDOW_NORMAL) print("VEDIO client connected...") while self.cap.isOpened(): ret, frame = self.cap.read() if self.showme: cv2.imshow('You', frame) if cv2.waitKey(1) & 0xFF == 27: self.showme = False cv2.destroyWindow('You') sframe = cv2.resize(frame, (0,0), fx=self.fx, fy=self.fx) data = pickle.dumps(sframe) zdata = zlib.compress(data, zlib.Z_BEST_COMPRESSION) try: self.sock.sendall(struct.pack("L", len(zdata)) + zdata) except: break for i in range(self.interval): self.cap.read()
def __init__(self, video_src = 0, interactive = True, video = 'vss.avi', fallback = 'synth:bg=./data/hi.jpg:noise=0.05', save_frames = False, frame_dir = './data', frame_prefix='frame-'): cam = create_capture(video_src, fallback=fallback) vwriter = None if interactive: print("q to quit") window = 'video square search' cv2.namedWindow(window, cv2.WINDOW_NORMAL) else: vwriter = VideoWriter(video) run = True t = clock() frameCounter = 0 while run: ret, img = cam.read() if interactive: print("read next image") squares = find_squares(img, 0.2) nOfSquares = len(squares) cv2.drawContours( img, squares, -1, (0, 255, 0), 3 ) dt = clock() - t draw_str(img, (80, 80), 'time: %.1f ms found = %d' % (dt*1000, nOfSquares), 2.0) if interactive: cv2.imshow(window, img) print('q to quit, n for next') ch = 0xff & cv2.waitKey(100) if ch == ord('q'): run = False elif ch == ord('n'): continue else: vwriter.addFrame(img) if save_frames: fn = os.path.join(frame_dir, '%s-%04d.%s' % (frame_prefix, frameCounter, 'jpg')) print("Saving %d frame to %s" % (frameCounter, fn)) cv2.imwrite(fn, img) frameCounter+=1 if vwriter: vwriter.finalise()
def process_image(self, cv_image, header, tag): """ process the image """ objects = self.cascade.detectMultiScale(cv_image) for obj in objects: cv2.rectangle( cv_image, (obj[0], obj[1]), (obj[0] + obj[2], obj[1] + obj[3]), (0, 255, 0)) cv2.namedWindow(tag, cv2.WINDOW_NORMAL) cv2.resizeWindow(tag, 600, 600) cv2.imshow(tag, cv_image) cv2.waitKey(1)
def look_for_exit(self, image, debug=False, unwarped=True): """ Look for an exit from our position """ for row in range(1, image.shape[0]/10): img = image.copy() base_row = (row * 10) + 5 image_dump = os.environ.get("ZARJ_IMAGE_DUMP") markup = debug or (image_dump is not None) output = self.figure_path(img, base_row, markup, True) if output is not None and output['right'] is not None and\ output['left'] is not None: real_distance = self.unwarp_perspective((image.shape[1]/2, base_row)) if unwarped: base_row = real_distance if markup: txt = "({0:.2f})".format(real_distance) cv2.putText(img, txt, (0, img.shape[0]-10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255)) if debug: name = "_exit_decision_" cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.resizeWindow(name, 500, 250) cv2.imshow(name, img) cv2.waitKey(1) if image_dump is not None: cv2.imwrite(image_dump + '/exit_{0}.png'.format( debug_sequence()), img) if real_distance > 1.8: log('Wait a second! An exit {} away is too far away'.format( real_distance)) return None return base_row return None
def process_image(self, cv_image, header, tag): """ process the image """ cv2.namedWindow("watching:"+tag, cv2.WINDOW_NORMAL) cv2.resizeWindow("watching:"+tag, 500, 250) cv2.imshow("watching:"+tag, cv_image) cv2.waitKey(1)
def process_image(self, cv_image, header, tag): """ process the image """ hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) # mask for color range if self.color_range: mask = cv2.inRange(hsv, self.color_range[0], self.color_range[1]) count = cv2.countNonZero(mask) if count: kernel = np.ones((5, 5), np.uint8) mask = cv2.dilate(mask, kernel, iterations=2) contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) for i, c in enumerate(contours): x, y, w, h = cv2.boundingRect(c) if self.prefix is not None: name = '{0}{1}_{2}_{3}.png'.format(self.prefix, tag, header.seq, i) print name roi = cv_image[y:y+h, x:x+w] gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY) gray = cv2.equalizeHist(gray) cv2.imwrite(name, gray) for c in contours: x, y, w, h = cv2.boundingRect(c) cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0)) elif self.prefix is not None: name = '{0}Negative_{1}_{2}.png'.format(self.prefix, tag, header.seq, ) cv2.imwrite(name, cv_image) cv2.namedWindow(tag, cv2.WINDOW_NORMAL) cv2.resizeWindow(tag, 600, 600) cv2.imshow(tag, cv_image) cv2.waitKey(1)
def show_webcam_and_run(model, emoticons, window_size=None, window_name='webcam', update_time=10): """ Shows webcam image, detects faces and its emotions in real time and draw emoticons over those faces. :param model: Learnt emotion detection model. :param emoticons: List of emotions images. :param window_size: Size of webcam image window. :param window_name: Name of webcam image window. :param update_time: Image update time interval. """ cv2.namedWindow(window_name, WINDOW_NORMAL) if window_size: width, height = window_size cv2.resizeWindow(window_name, width, height) vc = cv2.VideoCapture(0) if vc.isOpened(): read_value, webcam_image = vc.read() else: print("webcam not found") return while read_value: for normalized_face, (x, y, w, h) in find_faces(webcam_image): prediction = model.predict(normalized_face) # do prediction if cv2.__version__ != '3.1.0': prediction = prediction[0] image_to_draw = emoticons[prediction] draw_with_alpha(webcam_image, image_to_draw, (x, y, w, h)) cv2.imshow(window_name, webcam_image) read_value, webcam_image = vc.read() key = cv2.waitKey(update_time) if key == 27: # exit on ESC break cv2.destroyWindow(window_name)
def showImage(img, weight, height): screen_res = weight, height scale_width = screen_res[0] / img.shape[1] scale_height = screen_res[1] / img.shape[0] scale = min(scale_width, scale_height) window_width = int(img.shape[1] * scale) window_height = int(img.shape[0] * scale) cv2.namedWindow('dst_rt', cv2.WINDOW_NORMAL) cv2.resizeWindow('dst_rt', window_width, window_height) cv2.imshow('dst_rt', img) cv2.waitKey(0) cv2.destroyAllWindows()
def main(): assert len(sys.argv) > 1, "Pass image file as argument." I = cv2.imread(sys.argv[-1]) assert I is not None, "Failed to open image." cv2.namedWindow("cat", cv2.WINDOW_NORMAL) cv2.imshow("cat", I) cv2.waitKey(0) J = pyflipping.vertflip(I) cv2.imshow("cat", J) cv2.waitKey(0) cv2.destroyAllWindows()
def main(): cv2.namedWindow("segment", cv2.WINDOW_NORMAL) db = speedlib.SpeedSegmentsDatabase.get_instance() for key in db.segments: seg = db.segments[key] cv2.imshow("segment", seg.get_image()) cv2.waitKey(100) label = raw_input("Enter label [current label: '%s']: " % (str(seg.label),)) seg.label = label db.save() print("Finished.")
def main(): cv2.namedWindow("speed", cv2.WINDOW_NORMAL) cv2.namedWindow("speed_bin", cv2.WINDOW_NORMAL) def on_route_advisor_visible(game, scr): speed_image = game.win.get_speed_image(scr) som = speedlib.SpeedOMeter(speed_image) speed_image_bin = som.get_postprocessed_image() cv2.imshow("speed", speed_image) cv2.imshow("speed_bin", speed_image_bin.astype(np.uint8)*255) cv2.waitKey(1) segments = som.split_to_segments() print("Found %d segments" % (len(segments),), [s.arr.shape for s in segments]) for seg in segments: if not seg.is_in_database(): speedlib.SpeedSegmentsDatabase.get_instance().add(seg) print("Added new segment with key: %s" % (seg.get_key(),)) else: print("Segment already in database.") game = ets2game.ETS2Game() game.on_route_advisor_visible = on_route_advisor_visible game.run()
def __init__(self, fn): self.img = cv2.imread(fn) if self.img is None: raise Exception('Failed to load image file: %s' % fn) h, w = self.img.shape[:2] self.markers = np.zeros((h, w), np.int32) self.markers_vis = self.img.copy() self.cur_marker = 1 self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255 self.auto_update = True cv2.namedWindow('img', cv2.WINDOW_NORMAL) cv2.moveWindow('img',300,200) self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors) self.returnVar = self.markers.copy()
def watershed(self): m = self.markers.copy() cv2.watershed(self.img, m) self.returnVar = m.copy() overlay = self.colors[np.maximum(m, 0)] vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3) cv2.namedWindow('watershed', cv2.WINDOW_NORMAL) cv2.moveWindow('watershed',780,200) cv2.imshow('watershed', vis)
def add_person(people_folder, shape): """ Funtion to add pictures of a person """ person_name = input('What is the name of the new person: ').lower() folder = people_folder + person_name if not os.path.exists(folder): input("I will now take 20 pictures. Press ENTER when ready.") os.mkdir(folder) video = VideoCamera() detector = FaceDetector('face_recognition_system/frontal_face.xml') counter = 1 timer = 0 cv2.namedWindow('Video Feed', cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Saved Face', cv2.WINDOW_NORMAL) while counter < 21: frame = video.get_frame() face_coord = detector.detect(frame) if len(face_coord): frame, face_img = get_images(frame, face_coord, shape) # save a face every second, we start from an offset '5' because # the first frame of the camera gets very high intensity # readings. if timer % 100 == 5: cv2.imwrite(folder + '/' + str(counter) + '.jpg', face_img[0]) print ('Images Saved:' + str(counter)) counter += 1 cv2.imshow('Saved Face', face_img[0]) cv2.imshow('Video Feed', frame) cv2.waitKey(50) timer += 5 else: print ("This name already exists.") sys.exit()
def generateMask(img): blured_img = getBlurImage(img) hsv, hue, sat, val = getHSVImage(blured_img) # binary_img_hsv = binaryIMG(blured_img) cv2.namedWindow('hsv', cv2.WINDOW_NORMAL) cv2.imshow("hsv", hsv) hsv += 35 cv2.namedWindow('hsv-inc', cv2.WINDOW_NORMAL) cv2.imshow("hsv-inc", hsv) gray = getBinaryImage(hsv) filtered_img = getFilterImage(gray) opening_img = getOpeningImage(filtered_img) closing_img = getClosingImage(opening_img) canny_edge_img = autoCannyEdgeDetection(closing_img) contours, contour_img = getContourImage(canny_edge_img) binary_line_img = drawLines(contour_img, contours) # convex_img = drawConvexHull(hsv, contours) dilation_img = getDilationImage(binary_line_img) cv2.namedWindow('gray', cv2.WINDOW_NORMAL) cv2.imshow("gray", gray) cv2.namedWindow('canny-edge', cv2.WINDOW_NORMAL) cv2.imshow("canny-edge", canny_edge_img) cv2.namedWindow('contour', cv2.WINDOW_NORMAL) cv2.imshow("contour", contour_img) cv2.namedWindow('dilation', cv2.WINDOW_NORMAL) cv2.imshow("dilation", dilation_img) # cv2.namedWindow('convex', cv2.WINDOW_NORMAL) # cv2.imshow("convex", convex_img) cv2.waitKey(0) cv2.destroyAllWindows() return dilation_img
def main(): img = cv2.imread("../data/aug/train/benign/ISIC_0000201.jpg_aug0.jpg") # img = cv2.imread("../data/aug/train/benign/ISIC_0000011.jpg_aug0.jpg") # img = cv2.imread("../data/aug/train/benign/ISIC_0000113.jpg_aug0.jpg") # img = cv2.imread("../data/aug/train/benign/ISIC_0009344.jpg_aug12.jpg") mask = generateMask(img) final = extractRegion(img, mask) cv2.namedWindow('final', cv2.WINDOW_NORMAL) cv2.imshow("final", final) cv2.waitKey(0) cv2.destroyAllWindows()
def get_image_detections(path): ssd = SSD() global i2name i2name = pickle.load(open("i2name.p", "rb")) #cv2.namedWindow("outputs", cv2.WINDOW_NORMAL) sample = io.imread(path)[:, :, :3] boxes_, confidences_ = ssd.single_image(sample) return boxes_, confidences_
def video(): global face_token ft=cv2.freetype.createFreeType2() ft.loadFontData(fontFileName='./data/font/simhei.ttf',id =0) face_cascade = cv2.CascadeClassifier('./data/cascades/haarcascade_frontalface_alt.xml') camera=cv2.VideoCapture(0) count = 0 while(True): ret,frame=camera.read() gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) faces=face_cascade.detectMultiScale(gray,1.3,5) for(x,y,w,h) in faces: img =cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2) if count%5<2: f=cv2.resize(gray[y:y+h,x:x+w],(200,200)) cv2.imwrite('./data/temp/temp.pgm',f) result=FaceAPI.searchItoI(image_file='./data/temp/temp.pgm') if len(result)==4: break if result["results"][0]["confidence"] >= 80.00: print result["results"][0]["confidence"] face_token=result["results"][0]["face_token"] detail=get_detail() # shutil.copyfile("./data/temp/temp.pgm","./data/at/%s/%s.pgm"%(detail,time.strftime('%Y%m%d%H%M%S'))) print detail ft.putText(img=img,text=detail[1], org=(x, y - 10), fontHeight=60,line_type=cv2.LINE_AA, color=(0,255,165), thickness=2, bottomLeftOrigin=True) # count+=1 else: print"Unknow face" cv2.putText(img,"Unknow", (x, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,225), 2) count +=1 print count cv2.namedWindow("image",cv2.WINDOW_NORMAL) cv2.imshow("image",frame) if cv2.waitKey(1000 / 12)&0xff==ord("q"): break camera.release() cv2.destroyAllWindows()
def get_seg_result(self,det_preds,imgs,imgname_list,stride=128): with tf.Graph().as_default(): image_batch = tf.placeholder(dtype=tf.float32,shape=[None,stride,stride,3],name = 'image_batch_seg') is_training = tf.placeholder(tf.bool, shape=[], name='is_training') with tf.variable_scope('Seg_Net'): seg_net = Seg_Net() logits = seg_net.inference(image_batch, is_training) seg_pred = seg_net.eval(logits=logits) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess,SEG_MODEL_PATH) print "Restored model parameters from {}".format(SEG_MODEL_PATH) all_patch_np_list,all_patch_cood_list = self._get_seg_data(det_preds,imgs,stride) for i,(patchs,coods) in enumerate(zip(all_patch_np_list,all_patch_cood_list)): num = int(math.ceil(float(patchs.shape[0])/SEG_BATCH_SIZE)) mask = np.zeros(shape=(imgs.shape[1],imgs.shape[2]),dtype=np.uint8) for j in range(num): start = j*SEG_BATCH_SIZE end = min((j+1)*SEG_BATCH_SIZE,patchs.shape[0]) input_batch = patchs[start:end] input_coods = coods[start:end] seg_preds, = sess.run([seg_pred],feed_dict={image_batch:input_batch,is_training:False}) print 'seg_preds.shape', seg_preds.shape seg_preds = np.squeeze(seg_preds,axis=3) for k in range(seg_preds.shape[0]): y_start = input_coods[k][0] x_start = input_coods[k][1] mask[y_start*stride:(y_start+1)*stride,x_start*stride:(x_start+1)*stride] = seg_preds[k] mask[np.where(mask==1)] = 255 # cv2.namedWindow('mask',cv2.WINDOW_NORMAL) # cv2.imshow('mask',mask) # cv2.waitKey(0) if not os.path.exists(SAVE_SEG_DIR): os.makedirs(SAVE_SEG_DIR) cv2.imwrite(os.path.join(SAVE_SEG_DIR,imgname_list[i]),mask)
def main(): root = './resize_4096' save_dir = './generate_defect1_again' dataset_generate(root,save_dir) # cv2.imshow("img", img) # cv2.waitKey(0) # def main(): # img = cv2.imread('./def_1.png',cv2.cv.CV_LOAD_IMAGE_GRAYSCALE) # print img.shape # cv2.namedWindow('label',cv2.WINDOW_NORMAL) # cv2.namedWindow('img',cv2.WINDOW_NORMAL) # # label = generate_defect_img(img,1,5) # label_img = np.zeros_like(img) # #generate_blur(img,label_img,1,(0.05,0.1),(0.05,0.1)) # #generate_scratch(img,label_img,1,(0.0001,0.002),5,(0.005,0.4)) # #generate_spot(img,label_img,1,(0.0001,0.0005),2) # cv2.imshow("label", label_img) # cv2.imshow("img", img) # cv2.waitKey(0)
def ViewScreenCaptureStream(self): #not developed it much, it requires more work to be done to be fully functional frames = [] frameFileNames = [fN for fN in self.ftp.nlst("\\"+ self.directories[self.dirNum] +"\\vv") if fN != "s.mm"] if frameFileNames: for fileName in frameFileNames: retrievedData = [] self.ftp.retrbinary('RETR ' + "\\"+ self.directories[self.dirNum] +"\\vv\\" + fileName, retrievedData.append) tempBuff = StringIO.StringIO() tempBuff.write(XorText("".join(retrievedData),self.xorMap)) tempBuff.seek(0) #need to jump back to the beginning before handing it off to PIL printscreen_pil = Image.open(tempBuff) printscreen_pil = printscreen_pil.resize((printscreen_pil.size[0],printscreen_pil.size[1]), Image.ANTIALIAS) frame = np.array(printscreen_pil.getdata(),dtype=np.uint8).reshape((printscreen_pil.size[1],printscreen_pil.size[0],3)) #frames.append(frame) cv2.namedWindow("window", cv2.WINDOW_NORMAL) cv2.imshow('window', frame) #cv2.resizeWindow('window', 200,200) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() break else: print "No frames available" return ''' for frame in frames: cv2.namedWindow("window", cv2.WINDOW_NORMAL) cv2.imshow('window', frame) #cv2.resizeWindow('window', 200,200) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() break '''
def main(): # Read in image with the shape (rows, cols, channels) im = imread('./lena.small.jpg') im = np.array(im) / 255. invSpatialStdev = float(1. / 5.) invColorStdev = float(1. / .125) # Construct the position vectors out of x, y, r, g, and b. positions = np.zeros((im.shape[0], im.shape[1], 5), dtype='float32') for r in range(im.shape[0]): for c in range(im.shape[1]): positions[r, c, 0] = invSpatialStdev * c positions[r, c, 1] = invSpatialStdev * r positions[r, c, 2] = invColorStdev * im[r, c, 0] positions[r, c, 3] = invColorStdev * im[r, c, 1] positions[r, c, 4] = invColorStdev * im[r, c, 2] out = PermutohedralLattice.filter(im, positions) logging.info('Done') out -= out.min() out /= out.max() im -= im.min() im /= im.max() out = cv2.cvtColor(out.astype('float32'), cv2.COLOR_RGB2BGR) im = cv2.cvtColor(im.astype('float32'), cv2.COLOR_RGB2BGR) cv2.namedWindow('original', cv2.WINDOW_NORMAL) cv2.namedWindow('filtered', cv2.WINDOW_NORMAL) cv2.imshow('original', im) cv2.imshow('filtered', out) cv2.waitKey(0)
def evaluate_images(): tb = TB() cv2.namedWindow("outputs", cv2.WINDOW_NORMAL) test_loader = sLoader.SVT('./svt1/train.xml', './svt1/test.xml') while True: imgs, anns = test_loader.nextBatch(3,'test') pred_labels_f, pred_locs_f, step = tb.sess.run([tb.pred_labels, tb.pred_locs, tb.global_step], feed_dict={tb.imgs_ph: imgs, tb.bn: False}) boxes_, confidences_ = matcher.format_output(pred_labels_f[0], pred_locs_f[0]) draw_outputs(imgs[0], boxes_, confidences_, wait=0)
def get_image_detections(path): tb = TB() #cv2.namedWindow("outputs", cv2.WINDOW_NORMAL) sample = cv2.imread(path)[:, :, :3] sample = cv2.resize(sample, (300,300)) boxes_, confidences_ = tb.single_image(sample) return boxes_, confidences_
def show_image(img, ms = 0): cv2.namedWindow("Window", cv2.WINDOW_NORMAL) cv2.imshow("Window", img) cv2.waitKey(ms) cv2.destroyAllWindows()
def Crop(img,title): def onmouse(event,x,y,flags,param): global ix,iy,roi,drawing # Draw Rectangle if event == cv2.EVENT_RBUTTONDOWN: drawing = True ix,iy = x,y elif event == cv2.EVENT_MOUSEMOVE: if drawing == True: cv2.rectangle(img,(ix,iy),(x,y),BLUE,-1) rect = (ix,iy,abs(ix-x),abs(iy-y)) elif event == cv2.EVENT_RBUTTONUP: drawing = False cv2.rectangle(img,(ix,iy),(x,y),BLUE,-1) rect = (ix,iy,x,y) roi.extend(rect) cv2.namedWindow(title,cv2.WINDOW_NORMAL) cv2.setMouseCallback(title,onmouse) print ("Right click and hold to draw a single rectangle ROI, beginning at the top left corner of the desired area. A blue box should appear. Hit esc to exit screen. Window can be resized by selecting borders.") while True: cv2.namedWindow(title,cv2.WINDOW_NORMAL) cv2.imshow(title,img) k = cv2.waitKey(1) & 0xFF if k == 27: break cv2.destroyAllWindows() print(roi) return(roi)
def make_normal_mask(path_tis_msk, path_tumor_msk, path_save_location): print('==> making normal mask...') tis_msk = cv2.imread(path_tis_msk) tumor_msk = cv2.imread(path_tumor_msk) tumor_msk_bool = (tumor_msk == 255) tis_msk_after = tis_msk.copy() tis_msk_after[tumor_msk_bool] = 0 print('==> saving normal mask at' + path_save_location + ' ...') cv2.imwrite(path_save_location, tis_msk_after) ### Display result """ cv2.namedWindow('tis_msk', cv2.WINDOW_NORMAL) cv2.namedWindow('tis_msk_after', cv2.WINDOW_NORMAL) cv2.namedWindow('tumor_msk', cv2.WINDOW_NORMAL) cv2.resizeWindow('tis_msk', 512, 512) cv2.resizeWindow('tis_msk_after', 512, 512) cv2.resizeWindow('tumor_msk', 512, 512) cv2.imshow('tis_msk', tis_msk) cv2.imshow('tis_msk_after', tis_msk_after) cv2.imshow('tumor_msk', tumor_msk) cv2.waitKey() cv2.destoryAllWindows() """
def showimages(): cv2.namedWindow('Source Image', cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Threshold Image', cv2.WINDOW_AUTOSIZE) cv2.namedWindow('Binary Image', cv2.WINDOW_AUTOSIZE) # cv2.namedWindow('Contour Image', cv2.WINDOW_NORMAL) # cv2.namedWindow('noise_remove Image', cv2.WINDOW_NORMAL) cv2.imshow("Source Image", src_img) cv2.imshow("Binary Image", bin_img) cv2.imshow("Threshold Image", final_thr) # cv2.imshow("Contour Image", final_contr) # cv2.imshow('noise_remove Image', noise_remove) # plt.show()
def im_show(name, image, resize=1): H, W = image.shape[0:2] cv2.namedWindow(name, cv2.WINDOW_NORMAL) cv2.imshow(name, image.astype(np.uint8)) cv2.resizeWindow(name, round(resize * W), round(resize * H))
def stream(quit, det): """ :param quit: Quit streaming :param det: detection object :return: """ global sav_frame global sav_result camera_src = None if args.device == str(0): # jetson camera_src = CameraSrc().get_cam_src() else: # desktop camera_src = 0 camera = cv2.VideoCapture(camera_src) assert camera.isOpened() if args.fullscreen: cv2.namedWindow(args.device, cv2.WINDOW_NORMAL) cv2.setWindowProperty(args.device, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) face_locs = None alignedFace = None while True: _, frame = camera.read() sav_frame = frame.copy() # add frames to queue frame_queue.put(sav_frame) # display detection results #face_locs = face_coordinates_queue.get() #alignedFace = face_aligned_queue.get() #if face_locs is not None: # print(len(alignedFace), face_locs) #det.display(frame=frame, face_locations=face_locs) cv2.imshow(args.device, frame) if cv2.waitKey(1) & 0xFF == ord('q'): quit.value = 1 break camera.release() cv2.destroyAllWindows() # Main Process