我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用cv_bridge.CvBridgeError()。
def ir_calib_callback(self,data): try: self.ir_img = self.mkgray(data) except CvBridgeError as e: print(e) ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (y_num,x_num)) cv2.imshow('ir_img',self.ir_img) cv2.waitKey(5) if ir_ret == True: ir_tempimg = self.ir_img.copy() cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(ir_tempimg, (y_num,x_num), ir_corners,ir_ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) depth_stream = open("/home/chentao/kinect_calibration/ir_camera_corners.yaml", "w") data = {'corners':ir_corners.tolist()} yaml.dump(data, depth_stream) cv2.imshow('ir_img',ir_tempimg) cv2.waitKey(5)
def rgb_callback(self,data): try: self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY) rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (x_num,y_num),None) cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL) cv2.imshow('rgb_img',self.rgb_img) cv2.waitKey(5) if rgb_ret == True: rgb_tempimg = self.rgb_img.copy() cv2.cornerSubPix(gray,rgb_corners,(5,5),(-1,-1),criteria) cv2.drawChessboardCorners(rgb_tempimg, (x_num,y_num), rgb_corners,rgb_ret) rgb_rvec, self.rgb_tvec, rgb_inliers = cv2.solvePnPRansac(objpoints, rgb_corners, rgb_mtx, rgb_dist) self.rgb_rmat, _ = cv2.Rodrigues(rgb_rvec) print("The world coordinate system's origin in camera's coordinate system:") print("===rgb_camera rvec:") print(rgb_rvec) print("===rgb_camera rmat:") print(self.rgb_rmat) print("===rgb_camera tvec:") print(self.rgb_tvec) print("rgb_camera_shape: ") print(self.rgb_img.shape) print("The camera origin in world coordinate system:") print("===camera rmat:") print(self.rgb_rmat.T) print("===camera tvec:") print(-np.dot(self.rgb_rmat.T, self.rgb_tvec)) rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_pose.yaml", "w") data = {'rmat':self.rgb_rmat.tolist(), 'tvec':self.rgb_tvec.tolist()} yaml.dump(data, rgb_stream) cv2.imshow('rgb_img',rgb_tempimg) cv2.waitKey(5)
def ir_callback(self,data): try: self.ir_img = self.mkgray(data) except CvBridgeError as e: print(e) ir_ret, ir_corners = cv2.findChessboardCorners(self.ir_img, (x_num,y_num)) cv2.namedWindow('ir_img', cv2.WINDOW_NORMAL) cv2.imshow('ir_img',self.ir_img) cv2.waitKey(5) if ir_ret == True: ir_tempimg = self.ir_img.copy() cv2.cornerSubPix(ir_tempimg,ir_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(ir_tempimg, (x_num,y_num), ir_corners,ir_ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) ir_rvec, self.ir_tvec, ir_inliers = cv2.solvePnPRansac(objpoints, ir_corners, depth_mtx, depth_dist) self.ir_rmat, _ = cv2.Rodrigues(ir_rvec) print("The world coordinate system's origin in camera's coordinate system:") print("===ir_camera rvec:") print(ir_rvec) print("===ir_camera rmat:") print(self.ir_rmat) print("===ir_camera tvec:") print(self.ir_tvec) print("ir_camera_shape: ") print(self.ir_img.shape) print("The camera origin in world coordinate system:") print("===camera rmat:") print(self.ir_rmat.T) print("===camera tvec:") print(-np.dot(self.ir_rmat.T, self.ir_tvec)) depth_stream = open("/home/chentao/kinect_calibration/ir_camera_pose.yaml", "w") data = {'rmat':self.ir_rmat.tolist(), 'tvec':self.ir_tvec.tolist()} yaml.dump(data, depth_stream) cv2.imshow('ir_img',ir_tempimg) cv2.waitKey(5)
def rgb_calib_callback(self,data): try: self.rgb_img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(self.rgb_img,cv2.COLOR_BGR2GRAY) rgb_ret, rgb_corners = cv2.findChessboardCorners(gray, (y_num,x_num),None) cv2.imshow('rgb_img',self.rgb_img) cv2.waitKey(5) if rgb_ret == True: rgb_tempimg = self.rgb_img.copy() cv2.cornerSubPix(gray,rgb_corners,(11,11),(-1,-1),criteria) cv2.drawChessboardCorners(rgb_tempimg, (y_num,x_num), rgb_corners,rgb_ret) rgb_stream = open("/home/chentao/kinect_calibration/rgb_camera_corners.yaml", "w") data = {'corners':rgb_corners.tolist()} yaml.dump(data, rgb_stream) cv2.imshow('rgb_img',rgb_tempimg) cv2.waitKey(5)
def rgb_callback(self,data): try: img = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # ret, corners = cv2.findChessboardCorners(gray, (x_num,y_num),None) ret, corners = cv2.findChessboardCorners(img, (x_num,y_num)) cv2.imshow('img',img) cv2.waitKey(5) if ret == True: cv2.cornerSubPix(gray,corners,(5,5),(-1,-1),criteria) tempimg = img.copy() cv2.drawChessboardCorners(tempimg, (x_num,y_num), corners,ret) # ret, rvec, tvec = cv2.solvePnP(objpoints, corners, mtx, dist, flags = cv2.CV_EPNP) rvec, tvec, inliers = cv2.solvePnPRansac(objpoints, corners, rgb_mtx, rgb_dist) print("rvecs:") print(rvec) print("tvecs:") print(tvec) # project 3D points to image plane imgpts, jac = cv2.projectPoints(axis, rvec, tvec, rgb_mtx, rgb_dist) imgpts = np.int32(imgpts).reshape(-1,2) cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[1]),[255,0,0],4) #BGR cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[2]),[0,255,0],4) cv2.line(tempimg, tuple(imgpts[0]), tuple(imgpts[3]),[0,0,255],4) cv2.imshow('img',tempimg) cv2.waitKey(5)
def camera_callback(self, msg): try: self.camera_data = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8") except cv_bridge.CvBridgeError: return gray = cv2.cvtColor(self.camera_data, cv2.COLOR_BGR2GRAY) blur = cv2.GaussianBlur(gray, (5, 5), 0) canny = cv2.Canny(blur, 30, 150) cv2.imshow("Robot Camera", canny) cv2.waitKey(1)
def imageCallback(self, msg): global qImg try: cv_image = self.bridge.imgmsg_to_cv2(msg, "rgb8") cv_image=cv2.resize(cv_image, (640, 480)) image=PImage.frombytes("RGB", (640, 480), cv_image.tostring()) Lock.acquire() qImg=ImageQt(image) Lock.release() except CvBridgeError as e: print 'a' print(e)
def img_callback(self, image): try: inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8') except CvBridgeError, e: print e inImgarr = np.array(inImg) try: self.outImg, self.face_names.data = self.process_image(inImgarr) # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8")) # self.face_names.data = ['a', 'b'] # print self.face_names self.name_pub.publish(self.face_names) self.all_names.data = self.names.values() # print self.all_names self.all_names_pub.publish(self.all_names) cv2.imshow("Face Recognition", self.outImg) cv2.waitKey(3) # print self.face_names except: print "Failed! Ensure data is collected & trained..."
def img_callback(self, image): try: inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8') except CvBridgeError, e: print e inImgarr = np.array(inImg) self.outImg = self.process_image(inImgarr) # self.train_img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8")) # cv2.namedWindow("Capture Face") cv2.imshow('Capture Face', self.outImg) cv2.waitKey(3) if self.count == 100*self.cp_rate: rospy.loginfo("Data Captured!") rospy.loginfo("Training Data...") self.fisher_train_data() rospy.signal_shutdown('done')
def depth_callback(self, ros_image): try: inImg = self.bridge.imgmsg_to_cv2(ros_image) except CvBridgeError, e: print e inImgarr = np.array(inImg, dtype=np.uint16) # inImgarr = cv2.GaussianBlur(inImgarr, (3, 3), 0) # cv2.normalize(inImgarr, inImgarr, 0, 1, cv2.NORM_MINMAX) self.outImg, self.num_fingers = self.process_depth_image(inImgarr) # outImg = self.process_depth_image(inImgarr) # rate = rospy.Rate(10) self.num_pub.publish(self.num_fingers) # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8")) # rate.sleep() cv2.imshow("Hand Gesture Recognition", self.outImg) cv2.waitKey(3)
def img_callback(self, image): try: inImg = self.bridge.imgmsg_to_cv2(image, 'bgr8') except CvBridgeError, e: print e inImgarr = np.array(inImg) try: self.outImg, self.face_names.data = self.process_image(inImgarr) # self.img_pub.publish(self.bridge.cv2_to_imgmsg(self.outImg, "bgr8")) # self.face_names.data = ['a', 'b'] self.name_pub.publish(self.face_names) cv2.imshow("Face Recognition", self.outImg) cv2.waitKey(3) # print self.face_names except: print "Failed! Ensure data is collected & trained..."
def callbackCamera(msg): print('callbackCamera: msg : seq=%d, timestamp=%010d:%09d'%( msg.header.seq, msg.header.stamp.secs, msg.header.stamp.nsecs )) try: cv_image = bridge.imgmsg_to_cv2(msg, "bgr8") except CvBridgeError as e: print(e) s = ('%010d%09d'%( msg.header.stamp.secs, msg.header.stamp.nsecs )) filepath = "./camera/" + s + ".jpg" #print filepath height, width, channels = cv_image.shape cv_image = cv_image[400:height-200, 0:width] cv2.imwrite(filepath, cv_image) detector.detect(filepath) pass
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5) # stream = open("/home/chentao/depth_test.yaml", "w") # data = {'img':depth_img.tolist()} # yaml.dump(data, stream)
def rgb_callback(self,data): try: self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) if self.drawing or self.rect_done: if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1): cv2.rectangle(self.rgb_image,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2) if self.rect_done: center_point = self.get_center_point() cv2.circle(self.rgb_image, tuple(center_point.astype(int)), 3, (0,0,255),-1) cv2.imshow('RGB Image', self.rgb_image) cv2.waitKey(5) # print "rgb"
def rgb_callback(self,data): try: self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) tempimg = self.rgb_image.copy() if self.drawing or self.rect_done: if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1): cv2.rectangle(tempimg,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2) if self.rect_done: center_point = self.get_center_point() cv2.circle(tempimg, tuple(center_point.astype(int)), 3, (0,0,255),-1) cv2.imshow('RGB Image', tempimg) cv2.waitKey(5) # print "rgb"
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5)
def write_image(self, outdir, msg, fmt='png'): results = {} image_filename = os.path.join(outdir, str(msg.header.stamp.to_nsec()) + '.' + fmt) try: if hasattr(msg, 'format') and 'compressed' in msg.format: buf = np.ndarray(shape=(1, len(msg.data)), dtype=np.uint8, buffer=msg.data) cv_image = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR) if cv_image.shape[2] != 3: print("Invalid image %s" % image_filename) return results results['height'] = cv_image.shape[0] results['width'] = cv_image.shape[1] # Avoid re-encoding if we don't have to if check_image_format(msg.data) == fmt: buf.tofile(image_filename) else: cv2.imwrite(image_filename, cv_image) else: cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") cv2.imwrite(image_filename, cv_image) except CvBridgeError as e: print(e) results['filename'] = image_filename return results
def depth_callback(self, msg): # create OpenCV depth image using defalut passthrough encoding try: depth_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough') except CvBridgeError as e: print(e) # using green ball (u, v) position, find depth value of Crazyflie point and divide by 1000 # to change millimeters into meters (for Kinect sensors only) self.cf_d = depth_image[self.cf_v, self.cf_u] / 1000.0 rospy.loginfo("Depth: x at %d y at %d z at %f", int(self.cf_u), int(self.cf_v), self.cf_d) # if depth value is zero, use the last non-zero depth value if self.cf_d == 0: self.cf_d = self.last_d else: self.last_d = self.cf_d # publish Crazyflie tf transform self.update_cf_transform (self.cf_u, self.cf_v, self.cf_d) # This function builds the Crazyflie base_link tf transform and publishes it.
def make_mask(limb, filename): """ Given a limb (right or left) and a name to save to (in the baxter_tools/share/images/ directory), create a mask of any dark objects in the image from the camera and save it. """ image_sub = rospy.Subscriber( '/cameras/' + limb + '_hand_camera/image',Image,callback) try: bridge = CvBridge() cv_image = bridge.imgmsg_to_cv2(img, "bgr8") except CvBridgeError, e: print e msk = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV) return msk
def run(self): while True: # Only run loop if we have an image if self.process: self.imgprocess.ProcessingImage(self.latestimage) # FormulaPi Image Processing Function self.AdjustMotorSpeed(self.imgprocess.trackoffset) # Compute Motor Commands From Image Output # Publish Processed Image cvImage = self.imgprocess.outputImage try: imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") self.image_pub.publish(imgmsg) except CvBridgeError as e: print(e) #cv2.imshow("Image window", self.cvImage) #cv2.waitKey(3)
def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: rospy.logerr(e) if self._camera_needs_to_be_initialized(cv_image): self._initialize_camera_parameters(cv_image) self._calculate_transformation_matrix() if self.transformation_matrix is None: self._calculate_transformation_matrix() warped = self.img_prep.warp_perspective(cv_image, self.transformation_matrix, self.transformated_image_resolution) try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(warped, "bgr8")) except CvBridgeError as e: rospy.logerr(e)
def callback(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") heigth, width, _ = cv_image.shape except CvBridgeError as e: rospy.logerr(e) if self.reset_tracking is True: self.init_lanemodel() self.reset_tracking = False self.lane_model.update_segments(cv_image.copy()) self.lane_model.draw_segments(cv_image) state_point_x = self.lane_model.state_point_x() try: self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) self.setpoint_pub.publish(0.0) if state_point_x: self.state_pub.publish(state_point_x - int(width/2)) except CvBridgeError as e: rospy.logerr(e)
def compressed_imgmsg_to_cv2(cmprs_img_msg, desired_encoding = "passthrough"): """ Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`. :param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message :param desired_encoding: The encoding of the image data, one of the following strings: * ``"passthrough"`` * one of the standard strings in sensor_msgs/image_encodings.h :rtype: :cpp:type:`cv::Mat` :raises CvBridgeError: when conversion is not possible. If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg. Otherwise desired_encoding must be one of the standard image encodings This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure. If the image only has one channel, the shape has size 2 (width and height) """ str_msg = cmprs_img_msg.data buf = np.ndarray(shape=(1, len(str_msg)), dtype=np.uint8, buffer=cmprs_img_msg.data) im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR) if desired_encoding == "passthrough": return im try: res = cvtColor2(im, "bgr8", desired_encoding) except RuntimeError as e: raise CvBridgeError(e) return res
def callback(self, msg): #convert image to opencv try: cv_image = self.bridge.imgmsg_to_cv2(msg) np_image= np.array(cv_image) except CvBridgeError, e: print "Could not convert ros message to opencv image: ", e return #calculate the fft magnitude img_float32 = np.float32(np_image) dft = cv2.dft(img_float32, flags = cv2.DFT_COMPLEX_OUTPUT) dft_shift = np.fft.fftshift(dft) magnitude_spectrum = cv2.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]) #normalize magnitude_spectrum_normalized = magnitude_spectrum / np.sum(magnitude_spectrum) #frequency domain entropy (-> Entropy Based Measure of Camera Focus. Matej Kristan, Franjo Pernu. University of Ljubljana. Slovenia) fde = np.sum( magnitude_spectrum_normalized * np.log(magnitude_spectrum_normalized) ) y = 20; x = 20 text = "fde: {0} (minimize this for focus)".format(np.sum(fde)) np_image = cv2.cvtColor(np_image, cv2.COLOR_GRAY2BGR) cv2.putText(np_image, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.5, color=(0, 0, 255), thickness=2) cv2.imshow(self.windowNameOrig, np_image) cv2.waitKey(10)
def img_cb(self, msg): try: self.cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") except CvBridgeError as e: print(e)
def _image_callback(self, msg): """ Called when a new sensor_msgs/Image is coming in :param msg: The image messaeg """ try: cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") except CvBridgeError as e: rospy.logerr(e) self._image_widget.set_image(cv_image)
def _image_callback(self, msg): """ Sensor_msgs/Image callback :param msg: The image message """ try: cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") except CvBridgeError as e: rospy.logerr(e) return self._image_widget.set_image(cv_image)
def recognize_srv_callback(self, req): """ Method callback for the Recognize.srv :param req: The service request """ self._response.recognitions = [] self._recognizing = True try: cv_image = self.bridge.imgmsg_to_cv2(req.image, "bgr8") except CvBridgeError as e: rospy.logerr(e) self._image_widget.set_image(cv_image) self._done_recognizing_button.setDisabled(False) timeout = 60.0 # Maximum of 60 seconds future = rospy.Time.now() + rospy.Duration(timeout) rospy.loginfo("Waiting for manual recognition, maximum of %d seconds", timeout) while not rospy.is_shutdown() and self._recognizing: if rospy.Time.now() > future: raise rospy.ServiceException("Timeout of %d seconds exceeded .." % timeout) rospy.sleep(rospy.Duration(0.1)) self._done_recognizing_button.setDisabled(True) return self._response
def grabImage(self, sim=False, viz=False): # Grab image from Kinect sensor msg = rospy.wait_for_message('/semihaptics/image' if not sim else '/wide_stereo/left/image_color', Image) try: image = self.bridge.imgmsg_to_cv2(msg) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if viz: PILImage.fromarray(np.uint8(image)).show() return image except CvBridgeError, e: print e return None
def callback_image(self, data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: rospy.logerr('[ros-video-recorder][VideoFrames] Converting Image Error. ' + str(e)) return self.frames.append((time.time(), cv_image))
def start_record(self): self.start_time = time.time() curr_time = self.start_time while self.end_time < 0 or curr_time <= self.end_time: try: canvas = np.zeros((self.output_height, self.output_width, 3), np.uint8) for frame_w in self.frame_wrappers: f = frame_w.get_latest(at_time=curr_time) if f is None: continue resized = cv2.resize(f, (frame_w.target_w, frame_w.target_h)) canvas[frame_w.target_y:frame_w.target_y+frame_w.target_h, frame_w.target_x:frame_w.target_x+frame_w.target_w] = resized # rospy.sleep(0.01) self.video_writer.write(canvas) if self.pub_img: try: self.pub_img.publish(self.bridge.cv2_to_imgmsg(canvas, 'bgr8')) except CvBridgeError as e: rospy.logerr('cvbridgeerror, e={}'.format(str(e))) pass rospy.sleep(0.01) if rospy.is_shutdown() and self.end_time < 0: self.terminate() while curr_time + self.interval > time.time(): rospy.sleep(self.interval) curr_time += self.interval except KeyboardInterrupt: self.terminate() continue self.video_writer.release()
def color_detection(self, data): """ Callback to detect r, g, and b values in the image """ try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print e hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV) for request in self.detection_requests: request.detection(hsv) ############################################# # Stereo image point cloud processing members #############################################
def left_color_detection(self, data): """ Callback to detect r, g, and b values in the image """ try: self.left_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as excep: print excep for processor in self.processors: if processor.side&self.LEFT == self.LEFT: processor.process_image(self.left_image, data.header, 'left')
def right_color_detection(self, data): """ Callback to detect r, g, and b values in the image """ try: self.right_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as excep: print excep for processor in self.processors: if processor.side&self.RIGHT == self.RIGHT: processor.process_image(self.right_image, data.header, 'right')
def left_color_detection(self, data): """ Callback to detect r, g, and b values in the image """ try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print e self.process_image(cv_image, data.header, 'left')
def right_color_detection(self, data): """ Callback to detect r, g, and b values in the image """ try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print e self.process_image(cv_image, data.header, 'right')
def _learn_face_srv(self, req): try: bgr_image = self._bridge.imgmsg_to_cv2(req.image, "bgr8") except CvBridgeError as e: error_msg = "Could not convert to opencv image: %s" % e rospy.logerr(error_msg) return {"error_msg": error_msg} now = datetime.now() cv2.imwrite("%s/%s_learn_%s.jpeg" % (self._storage_folder, now.strftime("%Y-%m-%d-%H-%M-%S-%f"), req.name), bgr_image) try: rep = self._get_rep(bgr_image) except Exception as e: error_msg = "Could not get representation of face image: %s" % e rospy.logerr(error_msg) return {"error_msg": error_msg} if req.name not in self._face_dict: self._face_dict[req.name] = [] self._face_dict[req.name].append(rep) rospy.loginfo("Succesfully learned face of '%s'" % req.name) # from http://www.diveintopython3.net/serializing.html if ( self._face_dict_filename != '' ): with open( self._face_dict_filename, 'wb' ) as f: pickle.dump( self._face_dict, f ); print "wrote _face_dict: %s" % self._face_dict_filename return {"error_msg": ""}
def _detect_face_srv(self, req): try: bgr_image = self._bridge.imgmsg_to_cv2(req.image, "bgr8") except CvBridgeError as e: error_msg = "Could not convert to opencv image: %s" % e rospy.logerr(error_msg) return {"error_msg": error_msg} # Display the resulting frame detections = [{"roi": _get_roi(bgr_image, d), "x": d.left(), "y": d.top(), "width": d.width(), "height": d.height()} for d in self._face_detector(bgr_image, 1)] # 1 = upsample factor # Try to recognize detections = self._update_detections_with_recognitions(detections) rospy.logerr("This is for debugging") rospy.logerr(detections) # Try to add attributes if req.external_api_request: detections = self._update_detections_with_attributes(detections) # Save images if req.save_images: self._save_images(detections, bgr_image) return { "face_detections": [FaceDetection(names=(d["names"] if "name" in d else []), l2_distances=(d["l2_distances"] if "name" in d else []), x=d["x"], y=d["y"], width=d["width"], height=d["height"], gender_is_male=(d["attrs"]["gender"]["value"] == "male" if "attrs" in d else 0), gender_score=(float(d["attrs"]["gender"]["confidence"]) if "attrs" in d else 0), age_score=(1.0 if "attrs" in d else 0), age=(int(d["attrs"]["age_est"]["value"]) if "attrs" in d else 0)) for d in detections], "error_msg": "" }
def img_callback(self,data): try: self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e)
def hand_img_callback(self, ros_image): try: self.hand_img = self.bridge.imgmsg_to_cv2(ros_image, "bgr8") except CvBridgeError, e: print e # cv2.imshow("Hand Image", self.hand_img) # cv2.waitKey(3)
def face_img_callback(self, ros_image): try: self.face_img = self.bridge.imgmsg_to_cv2(ros_image, "bgr8") except CvBridgeError, e: print e # cv2.imshow("Face Image", self.face_img) # cv2.waitKey(3)
def convert_depth_image(self, ros_image): # Use cv_bridge() to convert the ROS image to OpenCV format try: # Convert the depth image using the default passthrough encoding depth_image = self.cv_bridge.imgmsg_to_cv2(ros_image, "passthrough") except CvBridgeError, e: print e # Convert the depth image to a Numpy array self.depth_array = np.array(depth_image, dtype=np.float32)
def set_object_image(self, req, compress=False): """Handles SetObjectImage service requests. Args: req: SetObjectImageRequest/SetObjectCompressedImageRequest message. compress: Whether to return a compressed image or not. Returns: SetObjectImageResponse or SetObjectCompressedImageResponse. """ if compress: response = interop.srv.SetObjectCompressedImageResponse() else: response = interop.srv.SetObjectImageResponse() try: png_image = serializers.ObjectImageSerializer.from_msg(req.image) except CvBridgeError as e: rospy.logerr(e) response.success = False except Exception as e: rospy.logfatal(e) response.success = False else: try: self.objects_dir.set_object_image(req.id, png_image) except (KeyError, IOError) as e: rospy.logerr("Could not set object image: {}".format(e)) response.success = False else: response.success = True return response
def get_object_image(self, req, compress=False): """Handles GetObjectImage service requests. Args: req: GetObjectImageRequest/GetObjectCompressedImageRequest message. compress: Whether to return a compressed image or not. Returns: GetObjectImageResponse or GetObjectCompressedImageResponse. """ if compress: response = interop.srv.GetObjectCompressedImageResponse() else: response = interop.srv.GetObjectImageResponse() try: png = self.objects_dir.get_object_image(req.id) except (KeyError, IOError) as e: rospy.logerr("Could not get object image: {}".format(e)) response.success = False except Exception as e: rospy.logfatal(e) response.success = False else: try: response.image = serializers.ObjectImageSerializer.from_raw( png, compress) except CvBridgeError as e: rospy.logerr(e) response.success = False else: response.success = True return response
def laserCB(self, data): try: self.screen = np.squeeze(bridge.imgmsg_to_cv2(data, "mono8")) # print shape( self.screen) except CvBridgeError as e: print(e)
def laserCB(data): try: screen = np.squeeze(bridge.imgmsg_to_cv2(data, "mono8")) except CvBridgeError as e: print(e) cv2.imshow("Scree", screen) # rospy.loginfo(" dim : %s", screen.shape) cv2.waitKey(3)
def cvt_image(self,data): try: self.latestImage = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) if self.imgRcvd != True: self.imgRcvd = True