我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv2.imshow()。
def get_points(): # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*8,3), np.float32) objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('calibration_wide/GO*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (8,6), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (8,6), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() return objpoints, imgpoints
def skin_calib(self, raw_yrb): mask_skin = cv2.inRange(raw_yrb, self.mask_lower_yrb, self.mask_upper_yrb) cal_skin = cv2.bitwise_and(raw_yrb, raw_yrb, mask=mask_skin) cv2.imshow('YRB_calib', cal_skin) k = cv2.waitKey(5) & 0xFF if k == ord('s'): self.calib_switch = False cv2.destroyWindow('YRB_calib') ymin = cv2.getTrackbarPos('Ymin', 'YRB_calib') ymax = cv2.getTrackbarPos('Ymax', 'YRB_calib') rmin = cv2.getTrackbarPos('CRmin', 'YRB_calib') rmax = cv2.getTrackbarPos('CRmax', 'YRB_calib') bmin = cv2.getTrackbarPos('CBmin', 'YRB_calib') bmax = cv2.getTrackbarPos('CBmax', 'YRB_calib') self.mask_lower_yrb = np.array([ymin, rmin, bmin]) self.mask_upper_yrb = np.array([ymax, rmax, bmax]) # Do skin detection with some filtering
def test_image(addr): target = ['angry','disgust','fear','happy','sad','surprise','neutral'] font = cv2.FONT_HERSHEY_SIMPLEX im = cv2.imread(addr) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) faces = faceCascade.detectMultiScale(gray,scaleFactor=1.1) for (x, y, w, h) in faces: cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 2,5) face_crop = im[y:y+h,x:x+w] face_crop = cv2.resize(face_crop,(48,48)) face_crop = cv2.cvtColor(face_crop, cv2.COLOR_BGR2GRAY) face_crop = face_crop.astype('float32')/255 face_crop = np.asarray(face_crop) face_crop = face_crop.reshape(1, 1,face_crop.shape[0],face_crop.shape[1]) result = target[np.argmax(model.predict(face_crop))] cv2.putText(im,result,(x,y), font, 1, (200,0,0), 3, cv2.LINE_AA) cv2.imshow('result', im) cv2.imwrite('result.jpg',im) cv2.waitKey(0)
def run(self): cv2.namedWindow("display", cv2.WINDOW_NORMAL) cv2.setMouseCallback("display", self.opencv_calibration_node.on_mouse) cv2.createTrackbar("scale", "display", 0, 100, self.opencv_calibration_node.on_scale) if self.extra_queue: cv2.namedWindow("extra", cv2.WINDOW_NORMAL) while True: # wait for an image (could happen at the very beginning when the queue is still empty) while len(self.queue) == 0: time.sleep(0.1) im = self.queue[0] cv2.imshow("display", im) k = cv2.waitKey(6) & 0xFF if k in [27, ord('q')]: rospy.signal_shutdown('Quit') elif k == ord('s'): self.opencv_calibration_node.screendump(im) if self.extra_queue: if len(self.extra_queue): extra_img = self.extra_queue[0] cv2.imshow("extra", extra_img)
def run(im): im_disp = im.copy() window_name = "Draw line here." cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE) cv2.moveWindow(window_name, 910, 0) print " Drag across the screen to set lines.\n Do it twice" print " After drawing the lines press 'r' to resume\n" l1 = np.empty((2, 2), np.uint32) l2 = np.empty((2, 2), np.uint32) list = [l1,l2] mouse_down = False def callback(event, x, y, flags, param): global trigger, mouse_down if trigger<2: if event == cv2.EVENT_LBUTTONDOWN: mouse_down = True list[trigger][0] = (x, y) if event == cv2.EVENT_LBUTTONUP and mouse_down: mouse_down = False list[trigger][1] = (x,y) cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]), (list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2) trigger += 1 else: pass cv2.setMouseCallback(window_name, callback) while True: cv2.imshow(window_name,im_disp) key = cv2.waitKey(10) & 0xFF if key == ord('r'): # Press key `q` to quit the program return list exit()
def load_image(self): ''' ???? ''' try: im_path,_=QFileDialog.getOpenFileName(self,'??????','./','Image Files(*.png *.jpg *.bmp)') if not os.path.exists(im_path): return self.im_path=im_path self.statu_text.append('???????'+self.im_path) if not self.swapper: self.swapper=Coupleswapper([self.im_path]) elif not self.im_path== self.cur_im_path: self.swapper.load_heads([self.im_path]) self.img_ori=self.swapper.heads[os.path.split(self.im_path)[-1]][0] cv2.imshow('Origin',self.img_ori) except (TooManyFaces,NoFace): self.statu_text.append(traceback.format_exc()+'\n????????????????????????????') return
def CaptureImage(): imageName = 'DontCare.jpg' #Just a random string cap = cv2.VideoCapture(0) while(True): # Capture frame-by-frame ret, frame = cap.read() #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) #For capture image in monochrome rgbImage = frame #For capture the image in RGB color space # Display the resulting frame cv2.imshow('Webcam',rgbImage) #Wait to press 'q' key for capturing if cv2.waitKey(1) & 0xFF == ord('q'): #Set the image name to the date it was captured imageName = str(time.strftime("%Y_%m_%d_%H_%M")) + '.jpg' #Save the image cv2.imwrite(imageName, rgbImage) break # When everything done, release the capture cap.release() cv2.destroyAllWindows() #Returns the captured image's name return imageName
def videoize(func, args, src = 0, win_name = "Cam", delim_wait = 1, delim_key = 27): cap = cv2.VideoCapture(src) while(1): ret, frame = cap.read() # To speed up processing; Almost real-time on my PC frame = cv2.resize(frame, dsize=None, fx=0.5, fy=0.5) frame = cv2.flip(frame, 1) out = func(frame, args) if out is None: continue out = cv2.resize(out, dsize=None, fx=1.4, fy=1.4) cv2.imshow(win_name, out) cv2.moveWindow(win_name, (s_w - out.shape[1])/2, (s_h - out.shape[0])/2) k = cv2.waitKey(delim_wait) if k == delim_key: cv2.destroyAllWindows() cap.release() return
def cvCaptureVideo(): capture = cv2.VideoCapture(0) if capture.isOpened() is False: raise("IO Error") cv2.namedWindow("Capture", cv2.WINDOW_NORMAL) while True: ret, image = capture.read() if ret == False: continue cv2.imshow("Capture", image) if cv2.waitKey(1) & 0xFF == ord('q'): break capture.release() cv2.destroyAllWindows() # Matplot???Web????????????
def run(self): print("VEDIO server starts...") self.sock.bind(self.ADDR) self.sock.listen(1) conn, addr = self.sock.accept() print("remote VEDIO client success connected...") data = "".encode("utf-8") payload_size = struct.calcsize("L") cv2.namedWindow('Remote', cv2.WINDOW_NORMAL) while True: while len(data) < payload_size: data += conn.recv(81920) packed_size = data[:payload_size] data = data[payload_size:] msg_size = struct.unpack("L", packed_size)[0] while len(data) < msg_size: data += conn.recv(81920) zframe_data = data[:msg_size] data = data[msg_size:] frame_data = zlib.decompress(zframe_data) frame = pickle.loads(frame_data) cv2.imshow('Remote', frame) if cv2.waitKey(1) & 0xFF == 27: break
def on_mouse(event, x, y, flags, params): # global img t = time() if event == cv2.EVENT_LBUTTONDOWN: print 'Start Mouse Position: '+str(x)+', '+str(y) sbox = [x, y] boxes.append(sbox) # print count # print sbox elif event == cv2.EVENT_LBUTTONUP: print 'End Mouse Position: '+str(x)+', '+str(y) ebox = [x, y] boxes.append(ebox) print boxes crop = img[boxes[-2][1]:boxes[-1][1],boxes[-2][0]:boxes[-1][0]] cv2.imshow('crop',crop) k = cv2.waitKey(0) if ord('r')== k: cv2.imwrite('Crop'+str(t)+'.jpg',crop) print "Written to file"
def visualize(frame, coordinates_list, alpha = 0.80, color=[255, 255, 255]): """ Args: 1. frame: OpenCV's image which has to be visualized. 2. coordinates_list: List of coordinates which will be visualized in the given `frame` 3. alpha, color: Some parameters which help in visualizing properly. A convex hull will be shown for each element in the `coordinates_list` """ layer = frame.copy() output = frame.copy() for coordinates in coordinates_list: c_hull = cv2.convexHull(coordinates) cv2.drawContours(layer, [c_hull], -1, color, -1) cv2.addWeighted(layer, alpha, output, 1 - alpha, 0, output) cv2.imshow("Output", output)
def camera_detector(self, cap, wait=10): detect_timer = Timer() ret, _ = cap.read() while ret: ret, frame = cap.read() detect_timer.tic() result = self.detect(frame) detect_timer.toc() print('Average detecting time: {:.3f}s'.format(detect_timer.average_time)) self.draw_result(frame, result) cv2.imshow('Camera', frame) cv2.waitKey(wait) ret, frame = cap.read()
def imshow_cv(label, im, block=False, text=None, wait=2): vis = im.copy() print_status(vis, text=text) window_manager.imshow(label, vis) ch = cv2.waitKey(0 if block else wait) & 0xFF if ch == ord(' '): cv2.waitKey(0) if ch == ord('v'): print('Entering debug mode, image callbacks active') while True: ch = cv2.waitKey(10) & 0xFF if ch == ord('q'): print('Exiting debug mode!') break if ch == ord('s'): fn = 'img-%s.png' % time.strftime("%Y-%m-%d-%H-%M-%S") print 'Saving %s' % fn cv2.imwrite(fn, vis) elif ch == 27 or ch == ord('q'): sys.exit(1)
def _render(self): self._smoothed_img = cv2.GaussianBlur(self.image, (self._filter_size, self._filter_size), sigmaX=0, sigmaY=0) self._edge_img = cv2.Canny(self._smoothed_img, self._threshold1, self._threshold2) cv2.imshow('smoothed', self._smoothed_img) cv2.imshow('edges', self._edge_img)
def on_new_point_cloud(data): global im pc = pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z","intensity")) #print pc.type #print data.type cloud_points = [] for p in pc: cloud_points.append(p) npc = np.array(cloud_points) #lidar_to_2d_front_view(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, val="depth", y_fudge=Y_FUDGE) #lidar_to_2d_front_view(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, val="height", y_fudge=Y_FUDGE) #lidar_to_2d_front_view(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, val="reflectance", y_fudge=Y_FUDGE) #im = birds_eye_point_cloud(npc, side_range=(-10, 10), fwd_range=(-10, 10), res=0.1) im = point_cloud_to_panorama(npc, v_res=VRES, h_res=HRES, v_fov=VFOV, y_fudge=5, d_range=(0,100)) #plt.imshow(im,cmap='spectral') #plt.show()
def camera_callback(self, msg): try: self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8") except cv_bridge.CvBridgeError: return gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 30, 150) cv2.imshow("Robot Camera", canny) cv2.waitKey(1)
def bench(folder): from os.path import join from video_capture.av_file_capture import File_Capture cap = File_Capture(join(folder,'marker-test.mp4')) markers = [] detected_count = 0 for x in range(500): frame = cap.get_frame() img = frame.img gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) markers = detect_markers_robust(gray_img,5,prev_markers=markers,true_detect_every_frame=1,visualize=True) draw_markers(img, markers) cv2.imshow('Detected Markers', img) # for m in markers: # if 'img' in m: # cv2.imshow('id %s'%m['id'], m['img']) # cv2.imshow('otsu %s'%m['id'], m['otsu']) if cv2.waitKey(1) == 27: break detected_count += len(markers) print(detected_count) #2900 #3042 #3021
def find_contour(self, img_src, Rxmin, Rymin, Rxmax, Rymax): cv2.rectangle(img_src, (Rxmax, Rymax), (Rxmin, Rymin), (0, 255, 0), 0) crop_res = img_src[Rymin: Rymax, Rxmin:Rxmax] grey = cv2.cvtColor(crop_res, cv2.COLOR_BGR2GRAY) _, thresh1 = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU) cv2.imshow('Thresh', thresh1) contours, hierchy = cv2.findContours(thresh1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # draw contour on threshold image if len(contours) > 0: cv2.drawContours(thresh1, contours, -1, (0, 255, 0), 3) return contours, crop_res # Check ConvexHull and Convexity Defects
def color_quant(input,K,output): img = cv2.imread(input) Z = img.reshape((-1,3)) # convert to np.float32 Z = np.float32(Z) # define criteria, number of clusters(K) and apply kmeans() criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 15, 1.0) ret,label,center=cv2.kmeans(Z,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS) # Now convert back into uint8, and make original image center = np.uint8(center) res = center[label.flatten()] res2 = res.reshape((img.shape)) cv2.imshow('res2',res2) cv2.waitKey(0) cv2.imwrite(output, res2) cv2.destroyAllWindows()
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" # print (obj.blockSize,obj.ksize,obj.k) # edges = cv2.Canny(img,obj.minVal,obj.maxVal) # color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) # edges=color # kernel = np.ones((obj.xsize,obj.ysize),np.uint8) opening = cv2.morphologyEx(img,cv2.MORPH_OPEN,kernel, iterations = obj.iterations) if True: print "zeige" cv2.imshow(obj.Label,opening) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=opening
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" # print (obj.blockSize,obj.ksize,obj.k) edges = cv2.Canny(img,obj.minVal,obj.maxVal) color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) edges=color if True: print "zeige" cv2.imshow(obj.Label,edges) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=edges
def animpingpong(self): obj=self.Object img=None if not obj.imageFromNode: img = cv2.imread(obj.imageFile) else: print "copy image ..." img = obj.imageNode.ViewObject.Proxy.img.copy() print "cpied" print " loaded" # print (obj.blockSize,obj.ksize,obj.k) # edges = cv2.Canny(img,obj.minVal,obj.maxVal) # color = cv2.cvtColor(edges, cv2.COLOR_GRAY2RGB) # edges=color # kernel = np.ones((obj.xsize,obj.ysize),np.uint8) closing = cv2.morphologyEx(img,cv2.MORPH_CLOSE,kernel, iterations = obj.iterations) if True: print "zeige" cv2.imshow(obj.Label,closing) print "gezeigt" else: from matplotlib import pyplot as plt plt.subplot(121),plt.imshow(img,cmap = 'gray') plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) plt.subplot(122),plt.imshow(dst,cmap = 'gray') plt.title('Corner Image'), plt.xticks([]), plt.yticks([]) plt.show() print "fertig" self.img=closing
def findpathlist(ed,showPics=True): ''' generate list of pathes ''' pathlist=[] w,l=ed.shape for x in range(l): for y in range(w): if ed[y][x] : path=runpath(ed,x,y) if len(path)>4: if showPics: cv2.imshow('remaining points',ed) cv2.waitKey(1) Gui.updateGui() pathlist.append(path) return pathlist
def draw_images(img, undistorted, title, cmap): f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9)) f.tight_layout() ax1.imshow(img) ax1.set_title('Original Image', fontsize=50) if cmap is not None: ax2.imshow(undistorted, cmap=cmap) else: ax2.imshow(undistorted) ax2.set_title(title, fontsize=50) plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.) plt.show() # TODO: Write a function that takes an image, object points, and image points # performs the camera calibration, image distortion correction and # returns the undistorted image
def apply_motion_blur(image, kernel_size, strength = 1.0): """Applies motion blur on image """ # generating the kernel kernel_motion_blur = np.zeros((kernel_size, kernel_size)) kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size) kernel_motion_blur = kernel_motion_blur / kernel_size rotation_kernel = np.random.uniform(0, 360) kernel_motion_blur = rotate(kernel_motion_blur, rotation_kernel) #cv2.imshow("kernel", cv2.resize(kernel_motion_blur, (100, 100))) kernel_motion_blur *= strength # applying the kernel to the input image output = cv2.filter2D(image, -1, kernel_motion_blur) return output
def vis_detections(im, class_name, dets, thresh=0.5): """Draw detected bounding boxes.""" inds = np.where(dets[:, -1] >= thresh)[0] if len(inds) == 0: return for i in inds: bbox = dets[i, :4] score = dets[i, -1] #Create Rectangle and Text using OpenCV #print ('ClassName:', class_name, 'bbox:', bbox, 'score:' ,score) #Draw the Rectangle cv2.rectangle(im, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (0, 255, 0), 3) #Draw the Text cv2.putText(im, class_name + ' ' + str(score), (bbox[0], bbox[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2, cv2.LINE_AA) #Show Image #cv2.imshow("Detect Result", im)
def hdSolidBlock(fn = "redHDSolidBlock.jpg", bgr = None): '''Generate test images as solid blocks of colour of known size, save to filename fn.''' # Create a zero (black) image of HD size with 3 colour dimensions. Colour space assumed BGR by default. h = 1080 w = 1920 img = np.zeros((h,w,3),dtype="uint8") # Want to set all of the pixels to bgr tuple, default red, 8 bit colour if not bgr: bgr = [0,0,255] img[:,:] = bgr vw = ImageViewer(img) vw.windowShow() #cv2.imshow("zeroes", frame) #ch = 0xff & cv2.waitKey(10000) #cv2.destroyAllWindows() cv2.imwrite(fn, img)
def do_warp(M, warp): warp = cv2.warpPerspective(orig, M, (maxWidth, maxHeight)) # convert the warped image to grayscale and then adjust # the intensity of the pixels to have minimum and maximum # values of 0 and 255, respectively warp = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY) warp = exposure.rescale_intensity(warp, out_range = (0, 255)) # the pokemon we want to identify will be in the top-right # corner of the warped image -- let's crop this region out (h, w) = warp.shape (dX, dY) = (int(w * 0.4), int(h * 0.45)) crop = warp[10:dY, w - dX:w - 10] # save the cropped image to file cv2.imwrite("cropped.png", crop) # show our images cv2.imshow("image", image) cv2.imshow("edge", edged) cv2.imshow("warp", imutils.resize(warp, height = 300)) cv2.imshow("crop", imutils.resize(crop, height = 300)) cv2.waitKey(0)
def draw(self, image): if len(self.tilesByOrder) == 0: cv2.imshow("image", image) for tile in self.tilesByOrder: cv2.rectangle(image, (tile.wx, tile.wy), (tile.wx + tile.w, tile.wy + tile.h), (0, 255, 0), 1) #Left bezel cv2.rectangle(image, (tile.wx - tile.l, tile.wy), (tile.wx, tile.wy + tile.h), (40, 255, 40), -1) #Top bezel cv2.rectangle(image, (tile.wx - tile.l, tile.wy - tile.t), (tile.wx + tile.w, tile.wy), (40, 255, 40), -1) #Right bezel cv2.rectangle(image, (tile.wx + tile.w, tile.wy - tile.t), (tile.wx + tile.w + tile.r, tile.wy + tile.h), (40, 255, 40), -1) #Bottom bezel cv2.rectangle(image, (tile.wx - tile.l, tile.wy + tile.h), (tile.wx + tile.w + tile.r, tile.wy + tile.h + tile.b), (40, 255, 40), -1) cv2.imshow("image", image)
def rotating_example(): img = cv2.imread('./data/hi.jpg') vwriter = VideoWriterRGB('hi-video.avi') frames = 0 for angle in range(0, 360, 5): rot = imutils.rotate(img, angle=angle) cv2.imshow("Angle = %d" % (angle), rot) vwriter.addFrame(rot) frames += 1 for angle in range(360, 0, -5): rot = imutils.rotate(img, angle=angle) cv2.imshow("Angle = %d" % (angle), rot) vwriter.addFrame(rot) frames += 1 vwriter.finalise() print("Created movie with %d frames" % frames)
def show_cut_img(img_name): img = cv2.imread(img_name, 0) cut_img = cut(img) cv2.imshow('cut image', cut_img) cv2.waitKey(0) cv2.destroyAllWindows() return cut_img # ??????????????????id??logoDirs????
def dispact_and_update(img, hack, base_im, x, y, w, h): try: myurl = "http://facejack.westeurope.cloudapp.azure.com:5001/imsend" headers = { 'content-type': "application/x-www-form-urlencoded", 'cache-control': "no-cache" } r = requests.post(url=myurl, data=img, headers=headers, params={'hack': str(hack)}).json() reply = 'authentication' in r and r['authentication'] == "ALLOWED" disp_face = cv2.resize(base_im[y:y + h, x:x + w], (224, 224), 0, 0, cv2.INTER_LANCZOS4) if reply: cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 255, 0), 2) else: cv2.rectangle(disp_face, (0, 0), (222, 222), (0, 0, 255), 2) cv2.imshow("Face", disp_face) finally: myl.release()
def generalBlur(srcpath, dstpath): img = cv2.imread(srcpath, 0) #???????? img1 = np.float32(img) #?????? kernel = np.ones((5,5),np.float32)/25 dst = cv2.filter2D(img1,-1,kernel) #cv2.filter2D(src,dst,kernel,auchor=(-1,-1))??? #????????????? #?????-1??????????plt.figure() plt.subplot(1,2,1), plt.imshow(img1,'gray') # plt.savefig('test1.jpg') plt.subplot(1,2,2), plt.imshow(dst,'gray') # plt.savefig('test2.jpg') plt.show() # ????
def click_and_crop(event, x, y, flags, param): global bbs, x_upper, id if event == cv2.EVENT_LBUTTONDOWN: if x_upper: bbs.append([x,y,0,0, 0,0,0,0]) else: bbs[-1][4] = x bbs[-1][5] = y elif event == cv2.EVENT_LBUTTONUP: if x_upper: bbs[-1][2] = abs(x - bbs[-1][0]) bbs[-1][3] = abs(y - bbs[-1][1]) bbs[-1][0] = min(x, bbs[-1][0]) bbs[-1][1] = min(y, bbs[-1][1]) cv2.rectangle(image, (bbs[-1][0],bbs[-1][1]), (bbs[-1][0]+bbs[-1][2],bbs[-1][1]+bbs[-1][3]), (0,0,255), 2) #cv2.putText(image, 'Upper %d' % id, (bbs[-1][0],bbs[-1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255)) else: bbs[-1][6] = abs(x - bbs[-1][4]) bbs[-1][7] = abs(y - bbs[-1][5]) bbs[-1][4] = min(x, bbs[-1][4]) bbs[-1][5] = min(y, bbs[-1][5]) cv2.rectangle(image, (bbs[-1][4],bbs[-1][5]), (bbs[-1][4]+bbs[-1][6],bbs[-1][5]+bbs[-1][7]), (0,255,0), 2) cv2.putText(image, 'Body %d' % id, (bbs[-1][4],bbs[-1][5]), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,255,0)) cv2.imshow("image", image) x_upper = not x_upper
def show(im, allobj, S, w, h, cellx, celly): for obj in allobj: a = obj[5] % S b = obj[5] // S cx = a + obj[1] cy = b + obj[2] centerx = cx * cellx centery = cy * celly ww = obj[3]**2 * w hh = obj[4]**2 * h cv2.rectangle(im, (int(centerx - ww/2), int(centery - hh/2)), (int(centerx + ww/2), int(centery + hh/2)), (0,0,255), 2) cv2.imshow("result", im) cv2.waitKey() cv2.destroyAllWindows()
def next_batch(self): self.count += 1 # print self.count start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img if self.index_in_epoch > self.number: self.index_in_epoch = 0 start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img end = self.index_in_epoch data_batch, label_batch = generate_data(self.img_path_list[start]) for i in range(start+1, end): data, label = generate_data(self.img_path_list[i]) # [4, 2, 128, 128], [4, 1, 8] data_batch = np.concatenate((data_batch, data)) # [64, 2, 128, 128] label_batch = np.concatenate((label_batch, label)) # [64, 1, 8] data_batch = np.array(data_batch).transpose([0, 2, 3, 1]) # (64, 128, 128, 2) # cv2.imshow('window2', data_batch[1,:,:,1].squeeze()) # cv2.waitKey() label_batch = np.array(label_batch).squeeze() # (64, 1, 8) return data_batch, label_batch
def next_batch(self): start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img if self.index_in_epoch > self.number: self.index_in_epoch = 0 start = self.index_in_epoch self.index_in_epoch += batch_size / pairs_per_img end = self.index_in_epoch data_batch, label_batch = generate_data(self.img_path_list[start]) for i in range(start+1, end): data, label = generate_data(self.img_path_list[i]) data_batch = np.concatenate((data_batch, data)) label_batch = np.concatenate((label_batch, label)) data_batch = np.array(data_batch).transpose([0, 2, 3, 1]) # cv2.imshow('window2', data_batch[1,:,:,1].squeeze()) # cv2.waitKey() label_batch = np.array(label_batch).squeeze() return data_batch, label_batch
def make_mouse_callback(imgs, ref_pt): # initialize the list of reference points and boolean indicating # whether cropping is being performed or not cropping = [False] clone = imgs[0] def _click_and_crop(event, x, y, flags, param): # grab references to the global variables # global ref_pt, cropping # if the left mouse button was clicked, record the starting # (x, y) coordinates and indicate that cropping is being # performed if event == cv2.EVENT_LBUTTONDOWN: ref_pt[0] = (x, y) cropping[0] = True # check to see if the left mouse button was released elif event == cv2.EVENT_LBUTTONUP: # record the ending (x, y) coordinates and indicate that # the cropping operation is finished ref_pt[1] = (x, y) cropping[0] = False # draw a rectangle around the region of interest imgs[1] = image = clone.copy() cv2.rectangle(image, ref_pt[0], ref_pt[1], (0, 255, 0), 2) cv2.imshow("image", image) elif event == cv2.EVENT_MOUSEMOVE and cropping[0]: img2 = clone.copy() cv2.rectangle(img2, ref_pt[0], (x, y), (0, 255, 0), 2) imgs[1] = image = img2 cv2.imshow("image", image) return _click_and_crop
def test_minicap(): from atx.drivers.android_minicap import AndroidDeviceMinicap cv2.namedWindow("preview") d = AndroidDeviceMinicap() while True: try: h, w = d._screen.shape[:2] img = cv2.resize(d._screen, (w/2, h/2)) cv2.imshow('preview', img) key = cv2.waitKey(1) if key == 100: # d for dump filename = time.strftime('%Y%m%d%H%M%S.png') cv2.imwrite(filename, d._screen) except KeyboardInterrupt: break cv2.destroyWindow('preview')
def test_grid(): m = StupidMonkey({'touch':10}) poss = [] while True: pos = m.get_touch_point() if not pos: break poss.append(pos) print 'grid point count:', len(poss) import cv2 import numpy img = numpy.zeros((1920, 1080)) for x,y in poss: img[x,y] = 255 img = cv2.resize(img, (540, 960)) cv2.imshow('grid', img) cv2.waitKey()
def draw_boxes(im, bboxes, is_display=True, color=None, caption="Image", wait=True): """ boxes: bounding boxes """ im=im.copy() for box in bboxes: if color==None: if len(box)==5 or len(box)==9: c=tuple(cm.jet([box[-1]])[0, 2::-1]*255) else: c=tuple(np.random.randint(0, 256, 3)) else: c=color cv2.rectangle(im, tuple(box[:2]), tuple(box[2:4]), c) if is_display: cv2.imshow(caption, im) if wait: cv2.waitKey(0) return im
def markPoint(event, x, y, flags, param): global idx global data global input if event == cv2.EVENT_LBUTTONUP: data.append((x, y)) cv2.circle(input, (x, y), 3, (0,0,255), 2) cv2.putText(input, str(idx), (x, y+4), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0, 0, 0), 2, cv2.LINE_AA) cv2.imshow("Mark points", input) idx = idx + 1
def visualize_image(image, name="Image", resize=False, save_image=False, path=None): """Helper function to visualize and save any image""" image = image.reshape([IMAGE_WIDTH, IMAGE_HEIGHT]) image = image.astype(np.uint8) if resize: image = cv2.resize(image, (IMAGE_WIDTH * 10, IMAGE_HEIGHT * 10)) cv2.imshow(name, image) if cv2.waitKey(0) & 0xFF == ord('q'): cv2.destroyAllWindows() if save_image: assert path is not None cv2.imwrite(path, image)
def image_preview(image): cv2.imshow('Image preview', image) cv2.waitKey(0) cv2.destroyAllWindows()
def display_solution(square_borders, start_grid, solution, image): """ Writes the solution to an image and displays said image. Params: square_borders -- A list containing the borders of all squares start_grid -- A list containing the sudoku starting values solution -- A list containing the sudoku solution image -- The image to write to """ cur_row = 0 cur_col = 0 for i, b in enumerate(square_borders): x, y, x2, y2 = b # Tuple unpacking # Calculate bottom-left position for text text_x, text_y = ((x2+x) / 2) - 10, ((y2+y) / 2) + 10 # Bottom-left corner for text position org = (text_x, text_y) # Only write text if the position was not set in the start_grid if start_grid[cur_row][cur_col] is 0: value = str(solution[cur_row][cur_col]) cv2.putText( img=image, text=value, org=org, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=(0, 255, 0), thickness=2) cur_col += 1 if cur_col % 9 == 0: cur_row += 1 cur_col = 0 cv2.imshow('Solution', image) cv2.waitKey(0) cv2.destroyAllWindows()
def process_image(image): array_image = numpy.array(image) array_image = cv2.resize(array_image, (0,0), fx=2, fy=2) #cv2.imshow('image', array_image) #cv2.waitKey(0) image = PIL.Image.fromarray(array_image) return(image)
def test(self): init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) self.saver_z.restore(sess, self.encode_z_model) self.saver_y.restore(sess, self.encode_y_model) realbatch_array, _ = MnistData.getNextBatch(self.ds_train, self.label_y, 0, 50, self.batch_size) output_image , label_y = sess.run([self.fake_images,self.e_y], feed_dict={self.images: realbatch_array}) #one-hot #label_y = tf.arg_max(label_y, 1) print label_y save_images(output_image , [8 , 8] , './{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0)) save_images(realbatch_array , [8 , 8] , './{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0)) gen_img = cv2.imread('./{}/test{:02d}_{:04d}.png'.format(self.sample_path , 0, 0), 0) real_img = cv2.imread('./{}/test{:02d}_{:04d}_r.png'.format(self.sample_path , 0, 0), 0) cv2.imshow("test_EGan", gen_img) cv2.imshow("Real_Image", real_img) cv2.waitKey(-1) print("Test finish!")
def image_detector(self, imname, wait=0): detect_timer = Timer() image = cv2.imread(imname) detect_timer.tic() result = self.detect(image) detect_timer.toc() print('Average detecting time: {:.3f}s'.format(detect_timer.average_time)) self.draw_result(image, result) cv2.imshow('Image', image) cv2.waitKey(wait)