我们从Python开源项目中,提取了以下24个代码示例,用于说明如何使用cv2.findChessboardCorners()。
def try_approximate_corners(self, board_dims): found, corners = cv2.findChessboardCorners(self.frame, board_dims) self.current_image_points = corners self.current_board_dims = board_dims return found
def get_points(): # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((6*8,3), np.float32) objp[:,:2] = np.mgrid[0:8, 0:6].T.reshape(-1 , 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Make a list of calibration images images = glob.glob('calibration_wide/GO*.jpg') # Step through the list and search for chessboard corners for idx, fname in enumerate(images): img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (8,6), None) # If found, add object points, image points if ret == True: objpoints.append(objp) imgpoints.append(corners) # Draw and display the corners cv2.drawChessboardCorners(img, (8,6), corners, ret) #write_name = 'corners_found'+str(idx)+'.jpg' #cv2.imwrite(write_name, img) cv2.imshow('img', img) cv2.waitKey(500) cv2.destroyAllWindows() return objpoints, imgpoints
def find_points(images): pattern_size = (9, 6) obj_points = [] img_points = [] # Assumed object points relation a_object_point = np.zeros((PATTERN_SIZE[1] * PATTERN_SIZE[0], 3), np.float32) a_object_point[:, :2] = np.mgrid[0:PATTERN_SIZE[0], 0:PATTERN_SIZE[1]].T.reshape(-1, 2) # Termination criteria for sub pixel corners refinement stop_criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 0.001) print('Finding points ', end='') debug_images = [] for (image, color_image) in images: found, corners = cv.findChessboardCorners(image, PATTERN_SIZE, None) if found: obj_points.append(a_object_point) cv.cornerSubPix(image, corners, (11, 11), (-1, -1), stop_criteria) img_points.append(corners) print('.', end='') else: print('-', end='') if DEBUG: cv.drawChessboardCorners(color_image, PATTERN_SIZE, corners, found) debug_images.append(color_image) sys.stdout.flush() if DEBUG: display_images(debug_images, DISPLAY_SCALE) print('\nWas able to find points in %s images' % len(img_points)) return obj_points, img_points # images is a lis of tuples: (gray_image, color_image)
def rgb_callback(self,data): try: self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY) rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None) cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL) cv2.imshow('rgb_img',self.rgb_img) cv2.waitKey(5) if rgb_ret == True: rgb_tempimg = self.rgb_img.copy() cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria) cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret) rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist) self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec) print("The world coordinate system's origin in camera's coordinate system:") print("===rgb_camera rvec:") print(rgb_rvec) print("===rgb_camera rmat:") print(self.rgb_rmat) print("===rgb_camera tvec:") print(self.rgb_tvec) print("rgb_camera_shape: ") print(self.rgb_img.shape) print("The camera origin in world coordinate system:") print("===camera rmat:") print(self.rgb_rmat.T) print("===camera tvec:") print(-np.dot(self.rgb_rmat.T, self.rgb_tvec)) rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w") data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()} yaml.dump(data, rgb_stream) cv2.imshow('rgb_img',rgb_tempimg) cv2.waitKey(5)
def corners_unwarp(img, nx, ny, mtx, dist): # Use the OpenCV undistort() function to remove distortion undist = cv2.undistort(img, mtx, dist, None, mtx) # Convert undistorted image to grayscale gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY) # Search for corners in the grayscaled image ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) if ret == True: # If we found corners, draw them! (just for fun) cv2.drawChessboardCorners(undist, (nx, ny), corners, ret) # Choose offset from image corners to plot detected corners # This should be chosen to present the result at the proper aspect ratio # My choice of 100 pixels is not exact, but close enough for our purpose here offset = 100 # offset for dst points # Grab the image shape img_size = (gray.shape[1], gray.shape[0]) # For source points I'm grabbing the outer four detected corners src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]]) # For destination points, I'm arbitrarily choosing some points to be # a nice fit for displaying our warped result # again, not exact, but close enough for our purposes dst = np.float32([[offset, offset], [img_size[0]-offset, offset], [img_size[0]-offset, img_size[1]-offset], [offset, img_size[1]-offset]]) # Given src and dst points, calculate the perspective transform matrix M = cv2.getPerspectiveTransform(src, dst) # Warp the image using OpenCV warpPerspective() warped = cv2.warpPerspective(undist, M, img_size) # Return the resulting image and matrix return warped, M
def ir_calib_callback(self,data): try: self.ir_img = self.mkgray(data) except CvBridgeError as e: print(e) ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (y_num,x_num)) cv2.imshow('ir_img',self.ir_img) cv2.waitKey(5) if ir_ret == True: ir_tempimg = self.ir_img.copy() cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(ir_tempimg, (y_num,x_num), ir_corners,ir_ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) depth_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "w") data = {'corners':ir_corners.tolist()} yaml.dump(data, depth_stream) cv2.imshow('ir_img',ir_tempimg) cv2.waitKey(5)
def corners_unwarp(img, nx, ny, undistorted): M = None warped = np.copy(img) # Use the OpenCV undistort() function to remove distortion undist = undistorted # Convert undistorted image to grayscale gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY) # Search for corners in the grayscaled image ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) if ret == True: # If we found corners, draw them! (just for fun) cv2.drawChessboardCorners(undist, (nx, ny), corners, ret) # Choose offset from image corners to plot detected corners # This should be chosen to present the result at the proper aspect ratio # My choice of 100 pixels is not exact, but close enough for our purpose here offset = 100 # offset for dst points # Grab the image shape img_size = (gray.shape[1], gray.shape[0]) # For source points I'm grabbing the outer four detected corners src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]]) # For destination points, I'm arbitrarily choosing some points to be # a nice fit for displaying our warped result # again, not exact, but close enough for our purposes dst = np.float32([[offset, offset], [img_size[0]-offset, offset], [img_size[0]-offset, img_size[1]-offset], [offset, img_size[1]-offset]]) # Given src and dst points, calculate the perspective transform matrix M = cv2.getPerspectiveTransform(src, dst) # Warp the image using OpenCV warpPerspective() warped = cv2.warpPerspective(undist, M, img_size) # Return the resulting image and matrix return warped, M
def ir_callback(self,data): try: self.ir_img = self.mkgray(data) except CvBridgeError as e: print(e) ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num)) cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL) cv2.imshow('ir_img',self.ir_img) cv2.waitKey(5) if ir_ret == True: ir_tempimg = self.ir_img.copy() cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist) self.ir_rmat, _ = cv2.Rodrigues(ir_rvec) print("The world coordinate system's origin in camera's coordinate system:") print("===ir_camera rvec:") print(ir_rvec) print("===ir_camera rmat:") print(self.ir_rmat) print("===ir_camera tvec:") print(self.ir_tvec) print("ir_camera_shape: ") print(self.ir_img.shape) print("The camera origin in world coordinate system:") print("===camera rmat:") print(self.ir_rmat.T) print("===camera tvec:") print(-np.dot(self.ir_rmat.T, self.ir_tvec)) depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w") data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()} yaml.dump(data, depth_stream) cv2.imshow('ir_img',ir_tempimg) cv2.waitKey(5)
def _get_corners(img, board, refine = True, checkerboard_flags=0): """ Get corners for a particular chessboard for an image """ h = img.shape[0] w = img.shape[1] if len(img.shape) == 3 and img.shape[2] == 3: mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) else: mono = img (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags) if not ok: return (ok, corners) # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras BORDER = 8 if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]): ok = False if refine and ok: # Use a radius of half the minimum distance between corners. This should be large enough to snap to the # correct corner, but not so large as to include a wrong corner in the search window. min_distance = float("inf") for row in range(board.n_rows): for col in range(board.n_cols - 1): index = row*board.n_rows + col min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0])) for row in range(board.n_rows - 1): for col in range(board.n_cols): index = row*board.n_rows + col min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0])) radius = int(math.ceil(min_distance * 0.5)) cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1), ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) return (ok, corners)
def try_approximate_corners_blur(self, board_dims, sharpness_threshold): sharpness = cv2.Laplacian(self.frame, cv2.CV_64F).var() if sharpness < sharpness_threshold: return False found, corners = cv2.findChessboardCorners(self.frame, board_dims) self.current_image_points = corners return found
def _findChessboard(self): # Find the chess board corners flags = cv2.CALIB_CB_FAST_CHECK if self._detect_sensible: flags = (cv2.CALIB_CB_FAST_CHECK | cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_FILTER_QUADS | cv2.CALIB_CB_NORMALIZE_IMAGE) (didFindCorners, corners) = cv2.findChessboardCorners( self.img, self.opts['size'], flags=flags ) if didFindCorners: # further refine corners, corners is updatd in place cv2.cornerSubPix(self.img, corners, (11, 11), (-1, -1), # termination criteria for corner estimation for # chessboard method (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) ) # returns None return didFindCorners, corners
def rgb_calib_callback(self,data): try: self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY) rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (y_num,x_num),None) cv2.imshow('rgb_img',self.rgb_img) cv2.waitKey(5) if rgb_ret == True: rgb_tempimg = self.rgb_img.copy() cv2.cornerSubPix(gray,rgb_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(rgb_tempimg, (y_num,x_num), rgb_corners,rgb_ret) rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_corners.yaml", "w") data = {'corners':rgb_corners.tolist()} yaml.dump(data, rgb_stream) cv2.imshow('rgb_img',rgb_tempimg) cv2.waitKey(5)
def rgb_callback(self,data): try: img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None) ret, corners = cv2.findChessboardCorners(img, (x_num,y_num)) cv2.imshow('img',img) cv2.waitKey(5) if ret == True: cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria) tempimg = img.copy() cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist) print("rvecs:") print(rvec) print("tvecs:") print(tvec) # project 3D points to image plane imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist) imgpts = np.int32(imgpts).reshape(-1,2) cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4) #BGR cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4) cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4) cv2.imshow('img',tempimg) cv2.waitKey(5)
def extract_checkerboard_and_draw_corners(self, image, chbrd_size): image = CvBridge().imgmsg_to_cv2(image, 'mono8') image_color = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) ret, corners = cv2.findChessboardCorners(image_color, chbrd_size) if not ret: cv2.putText(image_color, 'Checkerboard not found', (0, self.res_height - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255)) cv2.drawChessboardCorners(image_color, chbrd_size, corners, ret) return ret, corners, image_color
def chessboard(image, pattern_size=(9,5)): status, corners = cv2.findChessboardCorners(image, pattern_size, flags=4) if status: mean = corners.sum(0)/corners.shape[0] # mean is [[x,y]] return mean[0], corners else: return None
def load_frame_images(self): """ Load images (or image pairs) from self.full_frame_folder_path """ print("Loading frames from '{0:s}'".format(self.full_frame_folder_path)) all_files = [f for f in os.listdir(self.full_frame_folder_path) if osp.isfile(osp.join(self.full_frame_folder_path, f)) and f.endswith(".png")] all_files.sort() usable_frame_ct = sys.maxsize frame_number_sets = [] for video in self.videos: # assume matching numbers in corresponding left & right files files = [f for f in all_files if f.startswith(video.name)] files.sort() # added to be explicit cam_frame_ct = 0 frame_numbers = [] for ix_pair in range(len(files)): frame = cv2.imread(osp.join(self.full_frame_folder_path, files[ix_pair])) frame_number = int(re.search(r'\d\d\d\d', files[ix_pair]).group(0)) frame_numbers.append(frame_number) found, corners = cv2.findChessboardCorners(frame, self.board_dims) if not found: raise ValueError("Could not find corners in image '{0:s}'".format(files[ix_pair])) grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.cornerSubPix(grey, corners, (11, 11), (-1, -1), self.criteria_subpix) video.image_points.append(corners) video.usable_frames[frame_number] = ix_pair cam_frame_ct += 1 usable_frame_ct = min(usable_frame_ct, cam_frame_ct) frame_number_sets.append(frame_numbers) if len(self.videos) > 1: # check that all cameras have the same frame number sets if len(frame_number_sets[0]) != len(frame_number_sets[1]): raise ValueError( "There are some non-paired frames in folder '{0:s}'".format(self.full_frame_folder_path)) for i_fn in range(len(frame_number_sets[0])): fn0 = frame_number_sets[0][i_fn] fn1 = frame_number_sets[1][i_fn] if fn0 != fn1: raise ValueError("There are some non-paired frames in folder '{0:s}'." + " Check frame {1:d} for camera {2:s} and frame {3:d} for camera {4:s}." .format(self.full_frame_folder_path, fn0, self.videos[0].name, fn1, self.videos[1].name)) for i_frame in range(usable_frame_ct): self.object_points.append(self.board_object_corner_set) return usable_frame_ct
def detect(self, img): if len(img.shape) > 2: raise Exception("ChessboardDetector uses gray image as input") detection = None ret, corners = cv2.findChessboardCorners(img, self.chess_shape, None) if ret: ret, rvec, tvec = cv2.solvePnP(self.obj_points, corners, self.camera.matrix(), np.array([0, 0, 0, 0, 0])) # invert axis convention rvec[1] = -rvec[1] rvec[2] = -rvec[2] tvec[1] = -tvec[1] tvec[2] = -tvec[2] detection = Transform() detection.matrix[0:3, 0:3] = cv2.Rodrigues(rvec)[0] detection.set_translation(tvec[0] / 1000, tvec[1] / 1000, tvec[2] / 1000) return detection
def detect_corners(self): self.parent.app.setOverrideCursor(qt.QCursor(qt.Qt.WaitCursor)) self.chessboard_status = [] self.chessboard_points_2D = [np.zeros([ (self.chessboard_squares_x.value() - 1)*(self.chessboard_squares_y.value() - 1),2]) for i in range(len(self.images))] self.n_chessboard_points = (self.chessboard_squares_x.value() - 1, self.chessboard_squares_y.value() - 1 ) for imnum in range(len(self.images)): self.status_text.setText('<b>Detecting chessboard pattern in image {:d} / {:d}...</b>'.format(imnum,len(self.images))) self.parent.app.processEvents() status,points = cv2.findChessboardCorners( self.images[imnum], self.n_chessboard_points, flags=cv2.CALIB_CB_ADAPTIVE_THRESH ) self.chessboard_status.append(not status) if status: for j,point in enumerate(points): self.chessboard_points_2D[imnum][j,:] = point[0] self.status_text.setText('') self.parent.app.restoreOverrideCursor() if np.all(self.chessboard_status): dialog = qt.QMessageBox(self) dialog.setStandardButtons(qt.QMessageBox.Ok) dialog.setTextFormat(qt.Qt.RichText) dialog.setWindowTitle('Calcam - No Chessboards Detected') dialog.setText("No {:d} x {:d} square chessboard patterns were found in the images.".format(self.chessboard_squares_x.value(),self.chessboard_squares_y.value())) dialog.setInformativeText("Is the number of squares set correctly?") dialog.setIcon(qt.QMessageBox.Warning) dialog.exec_() elif np.any(self.chessboard_status): dialog = qt.QMessageBox(self) dialog.setStandardButtons(qt.QMessageBox.Ok) dialog.setTextFormat(qt.Qt.RichText) dialog.setWindowTitle('Calcam - Chessboard Detection') dialog.setText("A {:d} x {:d} square chessboard pattern could not be detected in the following {:d} of {:d} images, which will therefore not be included as additional chessboard constraints:".format(self.chessboard_squares_x.value(),self.chessboard_squares_y.value(),np.count_nonzero(self.chessboard_status),len(self.images))) dialog.setInformativeText('<br>'.join(['[#{:d}] '.format(i+1) + self.filenames[i] for i in range(len(self.filenames)) if self.chessboard_status[i] ])) dialog.setIcon(qt.QMessageBox.Warning) dialog.exec_() self.chessboard_status = [not status for status in self.chessboard_status] self.detection_run = True self.update_image_display() if np.any(self.chessboard_status): self.apply_button.setEnabled(True) self.status_text.setText('<b>Chessboard patterns detected successfully in {:d} images. Click Apply to use these in Calcam.</b>'.format(np.count_nonzero(self.chessboard_status),len(self.images))) else: self.apply_button.setEnabled(False) self.status_text.setText('')
def main(): # prepare object points nx = 8#TODO: enter the number of inside corners in x ny = 6#TODO: enter the number of inside corners in y # Make a list of calibration images fname = './calibration_wide/GOPR0058.jpg' img = cv2.imread(fname) plt.imshow(img) # Convert to grayscale gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find the chessboard corners ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) # If found, draw corners if ret == True: # Draw and display the corners cv2.drawChessboardCorners(img, (nx, ny), corners, ret) plt.imshow(img) plt.show()
def draw_chessboard_corners(image): # Find the chess board corners gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findChessboardCorners(gray_image, (9, 6), None) # Draw image if ret is True: criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) corners2 = cv2.cornerSubPix(gray_image, corners, (11, 11), (-1, -1), criteria) img = cv2.drawChessboardCorners(image, (9, 6), corners2, ret) return img
def find_chessboard(self,debug=False): """ Finds the cheeseboard in the image and recovers each corner Parameters ---------- debug: Boolean If True, shows image with cheeseboard corners overlayed (Defaul False) Returns ------- numpy array: (2,54) shape array that is each corner in the chessboard """ ic = np.array([self.Row,self.Column]) img = self.bc.read_raw() ret,ic = cv2.findChessboardCorners(img,(6,9)) ic_np = np.zeros([2,54]) for i in range(self.Column*self.Row): ic_np[:,i] = ic[i][0,:] if(debug): for i in range(len(ic)): p = ic[i] img[int(p[0][1]),int(p[0][0]),2] = 255 cv2.imshow('debug',img) cv2.waitKey(30) IPython.embed() return ic_np
def camera_cal(self, image): # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) nx = 8 ny = 6 dst = np.copy(image) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((ny * nx, 3), np.float32) objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Search for chessboard corners grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #ret_thresh, mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY) ret, corners = cv2.findChessboardCorners(image, (nx, ny), None) #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS)) # If found, add object points, image points if ret == True: objpoints.append(objp) cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria) imgpoints.append(corners) self.calibrated = True print ("FOUND!") #Draw and display the corners cv2.drawChessboardCorners(image, (nx, ny), corners, ret) # Do camera calibration given object points and image points ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None) # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = self.mtx dist_pickle["dist"] = self.dist dist_pickle['objpoints'] = objpoints dist_pickle['imgpoints'] = imgpoints pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) ) #else: #print("Searching...") return image
def find_chessboard(self, sx=6, sy=9): """Finds the corners of an sx X sy chessboard in the image. Parameters ---------- sx : int Number of chessboard corners in x-direction. sy : int Number of chessboard corners in y-direction. Returns ------- :obj:`list` of :obj:`numpy.ndarray` A list containing the 2D points of the corners of the detected chessboard, or None if no chessboard found. """ # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((sx * sy, 3), np.float32) objp[:, :2] = np.mgrid[0:sx, 0:sy].T.reshape(-1, 2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d point in real world space imgpoints = [] # 2d points in image plane. # create images img = self.data.astype(np.uint8) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Find the chess board corners ret, corners = cv2.findChessboardCorners(gray, (sx, sy), None) # If found, add object points, image points (after refining them) if ret: objpoints.append(objp) cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners) if corners is not None: return corners.squeeze() return None
def calculate_camera_calibration(calib_path, rows, cols, cal_image_size): """Calculates the camera calibration based on chessboard images. Args: calib_path: calibration data (imgs) dir path rows: number of rows on chessboard cols: number of columns on chessboard Returns: a `dict` with calibration points """ objp = np.zeros((rows * cols, 3), np.float32) objp[:, :2] = np.mgrid[0:cols, 0:rows].T.reshape(-1, 2) objpoints = [] imgpoints = [] images = glob(calib_path) cal_images = np.zeros((len(images), *cal_image_size), dtype=np.uint8) successfull_cnt = 0 for idx, fname in enumerate(tqdm(images, desc='Processing image')): img = scipy.misc.imread(fname) if img.shape[0] != cal_image_size[0] or img.shape[1] != cal_image_size[1]: img = scipy.misc.imresize(img, cal_image_size) gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) ret, corners = cv2.findChessboardCorners(gray, (cols, rows), None) if ret: successfull_cnt += 1 objpoints.append(objp) imgpoints.append(corners) img = cv2.drawChessboardCorners(img, (cols, rows), corners, ret) cal_images[idx] = img print("%s/%s camera calibration images processed." % (successfull_cnt, len(images))) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera( objpoints, imgpoints, cal_image_size[:-1], None, None) calibration = {'objpoints': objpoints, 'imgpoints': imgpoints, 'cal_images': cal_images, 'mtx': mtx, 'dist': dist, 'rvecs': rvecs, 'tvecs': tvecs} return calibration