我们从Python开源项目中,提取了以下10个代码示例,用于说明如何使用cv2.moveWindow()。
def run(im): im_disp = im.copy() window_name = "Draw line here." cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE) cv2.moveWindow(window_name, 910, 0) print " Drag across the screen to set lines.\n Do it twice" print " After drawing the lines press 'r' to resume\n" l1 = np.empty((2, 2), np.uint32) l2 = np.empty((2, 2), np.uint32) list = [l1,l2] mouse_down = False def callback(event, x, y, flags, param): global trigger, mouse_down if trigger<2: if event == cv2.EVENT_LBUTTONDOWN: mouse_down = True list[trigger][0] = (x, y) if event == cv2.EVENT_LBUTTONUP and mouse_down: mouse_down = False list[trigger][1] = (x,y) cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]), (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2) trigger += 1 else: pass cv2.setMouseCallback(window_name, callback) while True: cv2.imshow(window_name,im_disp) key = cv2.waitKey(10) & 0xFF if key == ord('r'): # Press key `q` to quit the program return list exit()
def get_start_points(image): window = cv2.namedWindow(MAZE_NAME, cv2.WINDOW_NORMAL) cv2.resizeWindow(MAZE_NAME, 900,900) cv2.imshow(MAZE_NAME,image) cv2.moveWindow(MAZE_NAME,100,100) print("Please \'A\' to use default start and end points, or press \'S\' to choose your own)") while(True): key = cv2.waitKey(0) if key == ord('a'): print("Using Default Start and End Points") imageProcessor = ImageProcessor(image) start_x,start_y = imageProcessor.getDefaultStart(image) end_x, end_y = imageProcessor.getDefaultEnd(image) print("Start Point: {0}, End Point: {1}".format((start_x,start_y),(end_x,end_y))) break elif key == ord ('s'): print("Please select a start point") start_x,start_y = get_user_selected_point(image) print ("Start Point: {0}, please select an end point".format((start_x,start_y))) end_x,end_y = get_user_selected_point(image) print("End Pont: {0}".format((end_x,end_y))) break else: print("Invalid") continue cv2.destroyAllWindows() return start_x,start_y,end_x,end_y
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27): cap = cv2.VideoCapture(src) while(1): ret, frame = cap.read() # To speed up processing; Almost real-time on my PC frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5) frame = cv2.flip(frame, 1) out = func(frame, args) if out is None: continue out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4) cv2.imshow(win_name, out) cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2) k = cv2.waitKey(delim_wait) if k == delim_key: cv2.destroyAllWindows() cap.release() return
def __init__(self): self.node_name = "hand_gestures" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) # self.cv_window_name = self.node_name # cv2.namedWindow("Depth Image", 1) # cv2.moveWindow("Depth Image", 20, 350) self.bridge = CvBridge() self.numFingers = RecognizeNumFingers() self.depth_sub = rospy.Subscriber("/asus/depth/image_raw", Image, self.depth_callback) self.num_pub = rospy.Publisher('num_fingers', Int32, queue_size=10, latch=True) # self.img_pub = rospy.Publisher('hand_img', Image, queue_size=10) rospy.loginfo("Waiting for image topics...")
def setupWindow(): filename = getUserSelectedImage() imageProcessor = ImageProcessor(cv2.imread(filename,0)) colourImage = cv2.imread(filename,1) image = imageProcessor.getThresholdedImage(False) granularity = imageProcessor.get_granularity(image, 100) print("Granularity: {0}".format(granularity)) start_x,start_y,end_x,end_y = get_start_points(image) image = imageProcessor.encloseMaze(image) mazerunner = MazeSolver.MazeSolver(image,granularity) solution = mazerunner.solveMaze(start_x,start_y,end_x,end_y) if(not solution): cv2.imshow(MAZE_NAME,image) else: solvedImage = draw_solution(solution, colourImage) solvedImage = imageProcessor.mark_point((end_x,end_y),3,(255,0,0),solvedImage) solvedImage = imageProcessor.mark_point((start_x,start_y),3,(255,0,0),solvedImage) window = cv2.namedWindow("Solved Image", cv2.WINDOW_NORMAL) cv2.resizeWindow("Solved Image", 900,900) cv2.moveWindow("Solved Image",100,100) cv2.imshow("Solved Image",solvedImage) print "Press any key to exit" cv2.waitKey(0) cv2.destroyAllWindows
def imshow(self, label, im): cv2.imshow(label, im) if label not in self.has_moved_: cv2.moveWindow(label, 1920, 0) self.has_moved_.add(label)
def __init__(self, fn): self.img = cv2.imread(fn) if self.img is None: raise Exception('Failed to load image file: %s' % fn) h, w = self.img.shape[:2] self.markers = np.zeros((h, w), np.int32) self.markers_vis = self.img.copy() self.cur_marker = 1 self.colors = np.int32( list(np.ndindex(2, 2, 2)) ) * 255 self.auto_update = True cv2.namedWindow('img', cv2.WINDOW_NORMAL) cv2.moveWindow('img',300,200) self.sketch = Sketcher('img', [self.markers_vis, self.markers], self.get_colors) self.returnVar = self.markers.copy()
def watershed(self): m = self.markers.copy() cv2.watershed(self.img, m) self.returnVar = m.copy() overlay = self.colors[np.maximum(m, 0)] vis = cv2.addWeighted(self.img, 0.5, overlay, 0.5, 0.0, dtype=cv2.CV_8UC3) cv2.namedWindow('watershed', cv2.WINDOW_NORMAL) cv2.moveWindow('watershed',780,200) cv2.imshow('watershed', vis)
def fullScreenCatch(): global img,imgSmall,newGray,finishDraw base = ImageGrab.grab() base.save('fullScreen.png') img = cv2.imread("fullScreen.png") cv2.namedWindow("image", cv2.WND_PROP_FULLSCREEN) cv2.moveWindow('image',0,0) #???????? cv2.setWindowProperty("image", cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN) cv2.setMouseCallback('image',draw_rect) imgSmall = img.copy() #?????????? imgGray = img.copy() #???? imgGray = cv2.cvtColor(imgGray,cv2.COLOR_BGR2GRAY) cv2.imwrite("fullGray.png", imgGray) #save and reread,????????????? newGray = cv2.imread("fullGray.png") finishDraw = False while(1): cv2.imshow('image',img) k=cv2.waitKey(2)&0xFF if k & finishDraw: catchScreen(x1,y1,x2,y2) #img2word(img) #chinese_word=eng2chinese(word) chinese_word='waiting...' #show word when stop draw font=cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img,chinese_word,(x1,y2+24), font, 0.8,(0,0,255),1) cv2.imshow('image',img) cv2.waitKey() break return
def show(src,norms,predictsloop,t,bottom=None,right=None): t=t%len(predictsloop) w=64 h=64 xscreenbase=0 yscreenbase=0 for i in range(num_batchsize): for j in range(len(src)): #j=0 imshow64x64("sample-"+str(i)+"-"+str(j), norms[j](src[j][i,0:3,:,:].transpose(1,2,0))) cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h) #j=1 #cv2.imshow("sample-"+str(i)+"-"+str(j), # imnorm(srchide0[i,0:3].transpose(1,2,0))) #cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h) #j=1 #cv2.imshow("sample-"+str(i)+"-"+str(j), # imnorm(recon[i,0:3].transpose(2,1,0))) #cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h) n=j+1 #for p in range(1): # j=p+n # base=srchide1.shape[1]-1 # cv2.imshow("sample-"+str(i)+"-"+str(j), # imnorm(enlarge(srchide1[i,base+p:base+p+1].transpose(1,2,0),4))) # cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h) #n+=1 for p in range(1): j=p+n imshow64x64("sample-"+str(i)+"-"+str(j), imnorm(predictsloop[t][p][i,0:3].transpose(2,1,0))) cv2.moveWindow("sample-"+str(i)+"-"+str(j),xscreenbase+j*w,yscreenbase+i*h) n+=4 if i>=7: break if bottom is not None: cv2.imshow('bottom',bottom) cv2.moveWindow("bottom",0,64*8) if right is not None: cv2.imshow('right',right) cv2.moveWindow("right",64*10,0)