我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sensor_msgs.msg.Image()。
def __init__(self): self.run_recognition = rospy.get_param('/rgb_object_detection/run_recognition') self.model_filename = rospy.get_param('/rgb_object_detection/model_file') self.weights_filename = rospy.get_param('/rgb_object_detection/weights_file') self.categories_filename = rospy.get_param('/rgb_object_detection/category_file') self.verbose = rospy.get_param('/rgb_object_detection/verbose', False) self.bridge = CvBridge() self.image_sub = rospy.Subscriber('/camera/rgb/image_color', Image, self.img_cb) self.patches_sub = rospy.Subscriber('/candidate_regions_depth', PolygonStamped, self.patches_cb) self.detection_pub = rospy.Publisher('/detections', Detection, queue_size=1) # you can read this value off of your sensor from the '/camera/depth_registered/camera_info' topic self.P = np.array([[525.0, 0.0, 319.5, 0.0], [0.0, 525.0, 239.5, 0.0], [0.0, 0.0, 1.0, 0.0]]) if self.run_recognition: self.cnn = CNN('', self.model_filename, self.weights_filename, self.categories_filename, '', 0, 0, self.verbose) self.cnn.load_model()
def draw_rect(self,event,x,y,flags,param): if event == cv2.EVENT_LBUTTONDOWN: self.drawing = True self.rect_done = False self.ix1 = x self.iy1 = y elif event == cv2.EVENT_MOUSEMOVE: if self.drawing == True: self.ix2 = x self.iy2 = y elif event == cv2.EVENT_LBUTTONUP: self.drawing = False self.ix2 = x self.iy2 = y cv2.rectangle(self.rgb_image,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2) center_point = self.get_center_point() cv2.circle(self.rgb_image, tuple(center_point.astype(int)), 3, (0,0,255),-1) cv2.imshow('RGB Image', self.rgb_image) cv2.waitKey(5) self.rect_done = True
def __init__(self, node_name, sub_topic, pub_topic): self.img_prep = ImagePreparator() self.bridge = CvBridge() self.camera = None self.horizon_y = None self.transformation_matrix = None self.image_resolution = DEFAULT_RESOLUTION self.transformated_image_resolution = DEFAULT_RESOLUTION self.image_pub = rospy.Publisher(pub_topic, Image, queue_size=QUEUE_SIZE) rospy.init_node(node_name, anonymous=True) self.image_sub = rospy.Subscriber(sub_topic, Image, self.callback) rospy.spin()
def main(args): rospy.init_node("publish_calib_file", args) files = glob.glob("left-*.png"); files.sort() print("All {} files".format(len(files))) image_pub = rospy.Publisher("image",Image, queue_size=10) bridge = CvBridge(); for f in files: if rospy.is_shutdown(): break raw_input("Publish {}?".format(f)) img = cv2.imread(f, 0) image_pub.publish(bridge.cv2_to_imgmsg(img, "mono8"))
def _setup_image(self, image_path): """ Load the image located at the specified path @type image_path: str @param image_path: the relative or absolute file path to the image file @rtype: sensor_msgs/Image or None @param: Returns sensor_msgs/Image if image convertable and None otherwise """ if not os.access(image_path, os.R_OK): rospy.logerr("Cannot read file at '{0}'".format(image_path)) return None img = cv2.imread(image_path) # Return msg return cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
def trigger_configuration(self): """ Callback when the configuration button is clicked """ topic_name, ok = QInputDialog.getItem(self._widget, "Select topic name", "Topic name", rostopic.find_by_type('sensor_msgs/Image')) if ok: self._create_subscriber(topic_name) available_rosservices = [] for s in rosservice.get_service_list(): try: if rosservice.get_service_type(s) in _SUPPORTED_SERVICES: available_rosservices.append(s) except: pass srv_name, ok = QInputDialog.getItem(self._widget, "Select service name", "Service name", available_rosservices) if ok: self._create_service_client(srv_name)
def start_detect(self): FunctionUnit.init_node(self) #print 'hello 1' #print self._receive_topic receive_1 = FunctionUnit.create_receive(self, self._receive_topic, Image, self.receive_1_cb) #print 'hello 2' FunctionUnit.spin(self)
def filter_image_msgs(topic, datatype, md5sum, msg_def, header): if(datatype=="sensor_msgs/CompressedImage"): if (opt_topic != "" and opt_topic == topic) or opt_topic == "": print "############# USING ######################" print topic,' with datatype:', str(datatype) return True; if(datatype=="theora_image_transport/Packet"): if (opt_topic != "" and opt_topic == topic) or opt_topic == "": print topic,' with datatype:', str(datatype) # print "############# USING ######################" print '!!! theora not supportet, sorry !!!' return False; if(datatype=="sensor_msgs/Image"): if (opt_topic != "" and opt_topic == topic) or opt_topic == "": print "############# USING ######################" print topic,' with datatype:', str(datatype) return True; return False;
def __init__(self, node_name='naoqi_camera'): NaoqiNode.__init__(self, node_name) self.camProxy = self.get_proxy("ALVideoDevice") if self.camProxy is None: exit(1) self.nameId = None self.camera_infos = {} def returnNone(): return None self.config = defaultdict(returnNone) # ROS publishers self.pub_img_ = rospy.Publisher('~image_raw', Image, queue_size=5) self.pub_info_ = rospy.Publisher('~camera_info', CameraInfo, queue_size=5) # initialize the parameter server self.srv = Server(NaoqiCameraConfig, self.reconfigure) # initial load from param server self.init_config() # initially load configurations self.reconfigure(self.config, 0)
def __init__(self): """ Initialize the parking spot recognizer """ #Subscribe to topics of interest rospy.Subscriber("/camera/image_raw", Image, self.process_image) rospy.Subscriber('/cmd_vel', Twist, self.process_twist) # Initialize video images cv2.namedWindow('video_window') self.cv_image = None # the latest image from the camera self.dst = np.zeros(IMG_SHAPE, np.uint8) self.arc_image = np.zeros(IMG_SHAPE, np.uint8) self.transformed = np.zeros(IMG_SHAPE, np.uint8) # Declare instance variables self.bridge = CvBridge() # used to convert ROS messages to OpenCV self.hsv_lb = np.array([0, 70, 60]) # hsv lower bound to filter for parking lines self.hsv_ub = np.array([30, 255, 140]) # hsv upper bound self.vel = None self.omega = None self.color = (0,0,255)
def start_processing(self): """ Start processing data """ if self.disabled: rospy.loginfo("Processing started") self.disabled = False if self.sub_left is None: self.sub_left = rospy.Subscriber( "/multisense/camera/left/image_color", Image, self.left_color_detection) rospy.sleep(0.1) if self.sub_right is None: self.sub_right = rospy.Subscriber( "/multisense/camera/right/image_color", Image, self.right_color_detection) rospy.sleep(0.1) if self.sub_cloud is None: self.sub_cloud = rospy.Subscriber( "/multisense/image_points2_color", PointCloud2, self.stereo_cloud)
def get_image_compressed(self): rospy.loginfo("Getting image...") image_msg = rospy.wait_for_message( "/wide_stereo/left/image_raw/compressed", CompressedImage) rospy.loginfo("Got image!") # Image to numpy array np_arr = np.fromstring(image_msg.data, np.uint8) # Decode to cv2 image and store cv2_img = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR) img_file_path = "/tmp/telegram_last_image.png" cv2.imwrite(img_file_path, cv2_img) rospy.loginfo("Saved to: " + img_file_path) return img_file_path # Define a few command handlers
def do_image_stuff(self, update): # Get topics of type Image topics_and_types = rospy.get_published_topics() image_topics = [] for top, typ in topics_and_types: if typ == 'sensor_msgs/Image': image_topics.append(top) keyboard = [] for topicname in image_topics: keyboard.append([InlineKeyboardButton( topicname, callback_data=topicname)]) reply_markup = InlineKeyboardMarkup(keyboard) update.message.reply_text('Choose image topic:', reply_markup=reply_markup)
def __init__(self): self.node_name = "face_recog_fisher" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) self.bridge = CvBridge() self.face_names = StringArray() self.all_names = StringArray() self.size = 4 face_haar = 'haarcascade_frontalface_default.xml' self.haar_cascade = cv2.CascadeClassifier(face_haar) self.face_dir = 'face_data_fisher' self.model = cv2.createFisherFaceRecognizer() # self.model = cv2.createEigenFaceRecognizer() (self.im_width, self.im_height) = (112, 92) rospy.loginfo("Loading data...") # self.fisher_train_data() self.load_trained_data() rospy.sleep(3) # self.img_sub = rospy.Subscriber("/asus/rgb/image_raw", Image, self.img_callback) self.img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback) # self.img_pub = rospy.Publisher('face_img', Image, queue_size=10) self.name_pub = rospy.Publisher('face_names', StringArray, queue_size=10) self.all_names_pub = rospy.Publisher('all_face_names', StringArray, queue_size=10) rospy.loginfo("Detecting faces...")
def __init__(self): self.node_name = "train_faces_eigen" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) self.bridge = CvBridge() self.size = 4 face_haar = 'haarcascade_frontalface_default.xml' self.haar_cascade = cv2.CascadeClassifier(face_haar) self.face_dir = 'face_data_eigen' self.face_name = sys.argv[1] self.path = os.path.join(self.face_dir, self.face_name) # self.model = cv2.createFisherFaceRecognizer() self.model = cv2.createEigenFaceRecognizer() self.cp_rate = 5 if not os.path.isdir(self.path): os.mkdir(self.path) self.count = 0 self.train_img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback) # self.train_img_pub = rospy.Publisher('train_face', Image, queue_size=10) rospy.loginfo("Capturing data...")
def __init__(self): self.node_name = "hand_gestures" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) # self.cv_window_name = self.node_name # cv2.namedWindow("Depth Image", 1) # cv2.moveWindow("Depth Image", 20, 350) self.bridge = CvBridge() self.numFingers = RecognizeNumFingers() self.depth_sub = rospy.Subscriber("/asus/depth/image_raw", Image, self.depth_callback) self.num_pub = rospy.Publisher('num_fingers', Int32, queue_size=10, latch=True) # self.img_pub = rospy.Publisher('hand_img', Image, queue_size=10) rospy.loginfo("Waiting for image topics...")
def find_person(self, name): # cv2.imshow("Face Image", self.face_img) # cv2.waitKey(3) count=0 found = False while count < 6 and found != True: for i in range(len(self.face_names)): if self.face_names[i] == name: print self.face_names[i] return True break count += 1 self.rotate_tbot(180.0, 45.0/2) rospy.sleep(5) # print count return found
def __init__(self): self.node_name = "train_faces_fisher" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) self.bridge = CvBridge() self.size = 4 face_haar = 'haarcascade_frontalface_default.xml' self.haar_cascade = cv2.CascadeClassifier(face_haar) self.face_dir = 'face_data_fisher' self.face_name = sys.argv[1] self.path = os.path.join(self.face_dir, self.face_name) self.model = cv2.createFisherFaceRecognizer() # self.model = cv2.createEigenFaceRecognizer() self.cp_rate = 5 if not os.path.isdir(self.path): os.mkdir(self.path) self.count = 0 self.train_img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback) # self.train_img_pub = rospy.Publisher('train_face', Image, queue_size=10) rospy.loginfo("Capturing data...")
def __init__(self): self.node_name = "face_recog_eigen" rospy.init_node(self.node_name) rospy.on_shutdown(self.cleanup) self.bridge = CvBridge() self.face_names = StringArray() self.size = 4 face_haar = 'haarcascade_frontalface_default.xml' self.haar_cascade = cv2.CascadeClassifier(face_haar) self.face_dir = 'face_data_eigen' # self.model = cv2.createFisherFaceRecognizer() self.model = cv2.createEigenFaceRecognizer() (self.im_width, self.im_height) = (112, 92) rospy.loginfo("Loading data...") # self.fisher_train_data() self.load_trained_data() rospy.sleep(3) # self.img_sub = rospy.Subscriber("/asus/rgb/image_raw", Image, self.img_callback) self.img_sub = rospy.Subscriber("/usb_cam/image_raw", Image, self.img_callback) # self.img_pub = rospy.Publisher('face_img', Image, queue_size=10) self.name_pub = rospy.Publisher('face_names', StringArray, queue_size=10) self.all_names_pub = rospy.Publisher('all_face_names', StringArray, queue_size=10) rospy.loginfo("Detecting faces...")
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5) # stream = open("/home/chentao/depth_test.yaml", "w") # data = {'img':depth_img.tolist()} # yaml.dump(data, stream)
def rgb_callback(self,data): try: self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) if self.drawing or self.rect_done: if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1): cv2.rectangle(self.rgb_image,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2) if self.rect_done: center_point = self.get_center_point() cv2.circle(self.rgb_image, tuple(center_point.astype(int)), 3, (0,0,255),-1) cv2.imshow('RGB Image', self.rgb_image) cv2.waitKey(5) # print "rgb"
def rgb_callback(self,data): try: self.rgb_image = self.br.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) tempimg = self.rgb_image.copy() if self.drawing or self.rect_done: if (self.ix1 != -1 and self.iy1 != -1 and self.ix2 != -1 and self.iy2 != -1): cv2.rectangle(tempimg,(self.ix1,self.iy1),(self.ix2,self.iy2),(0,255,0),2) if self.rect_done: center_point = self.get_center_point() cv2.circle(tempimg, tuple(center_point.astype(int)), 3, (0,0,255),-1) cv2.imshow('RGB Image', tempimg) cv2.waitKey(5) # print "rgb"
def depth_callback(self,data): try: self.depth_image= self.br.imgmsg_to_cv2(data, desired_encoding="passthrough") except CvBridgeError as e: print(e) # print "depth" depth_min = np.nanmin(self.depth_image) depth_max = np.nanmax(self.depth_image) depth_img = self.depth_image.copy() depth_img[np.isnan(self.depth_image)] = depth_min depth_img = ((depth_img - depth_min) / (depth_max - depth_min) * 255).astype(np.uint8) cv2.imshow("Depth Image", depth_img) cv2.waitKey(5)
def __init__(self): self.bridge = cv_bridge.CvBridge() cv2.namedWindow("input", 1) cv2.createTrackbar('param1', 'input', 10, 300, nothing) cv2.createTrackbar('param2', 'input', 15, 300, nothing) cv2.namedWindow("processed", 1) self.image_sb = rospy.Subscriber('/usb_cam/image_raw', Image, self.image_callback) self.motion = Twist() rate = rospy.Rate(20) # publish to cmd_vel of the jackal pub = rospy.Publisher("/jackal_velocity_controller/cmd_vel", Twist, queue_size=10) while not rospy.is_shutdown(): # publish Twist pub.publish(self.motion) rate.sleep()
def grabImage(self,camera_name,filename): """ Grabs exactly one image from a camera :param camera_name: The image of the camera that should be saved :type camera_name: str :param filename: The full path of the filename where this image should be saved :type filename: str """ if self.open(camera_name) != 0: return msg=rospy.wait_for_message('/cameras/' + camera_name + "/image", Image) img = cv_bridge.CvBridge().imgmsg_to_cv2(msg, "bgr8") cv2.imwrite(filename,img) rospy.loginfo("Saved Image %s"%filename) self.close(camera_name)
def contour_match(self, img): ''' Returns 1. Image with bounding boxes added ''' # get filtered contours contours = self.get_filtered_contours(img) detection = Detection() height,width,channel = img.shape mean_color = (15,253,250) for i, (cnt, box) in enumerate(contours): # plot box and label around contour x,y,w,h = box font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img,"cone", (x,y), font, 1,mean_color,4) cv2.rectangle(img,(x,y),(x+w,y+h), mean_color,2) if i == 0: detection.x = x detection.y = y detection.w = w detection.h = h detection.error_center = 0.5 - (x/float(width)) detection.error_size = (self.DESIRED_AREA-w*h)/float(width*height) cv2.putText(img,"center:%.2f, distance: %.2f" % (detection.error_center, detection.error_size), (x-w,y-h/2), font, 1,mean_color,4) # return the image with boxes around detected contours return img, detection
def __init__(self): self.detector = Detector() self.sub_image = rospy.Subscriber("/camera/rgb/image_rect_color", Image, self.processImage, queue_size=1) self.pub_image = rospy.Publisher("detection_image", Image, queue_size=1) self.bridge = CvBridge() rospy.loginfo("Object Detector Initialized.") self.drive_cmd = {'steer': 0, 'speed': 0} self.pub_detection = rospy.Publisher("object_detection",\ Detection, queue_size=1) #self.pub = rospy.Publisher("/vesc/ackermann_cmd_mux/input/navigation",\ # AckermannDriveStamped, queue_size =1 ) #self.thread = Thread(target=self.drive) #self.thread.start() rospy.loginfo("Detector initialized")
def __init__(self): global et global recognizer global dictid, labels self.engine1 = pyttsx.init() self.engine = pyttsx.init() self.engine1.say('Initializing components. All systems ready.') self.engine1.runAndWait() self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.callback) recognizer = cv2.face.createLBPHFaceRecognizer() et = libs.EyeTracker("cascades/haarcascade_frontalface_alt2.xml") i=0 path = 'faces' images,labels,dictid=libs.read_data(path) print labels recognizer.train(images, np.array(labels)) print 'termine' self.num=0
def __init__(self): self.bridge = CvBridge() #allows us to convert our image to cv2 self.zed_pub = rsp.Publisher("/image_echo", Image, queue_size = 1) self.imginfo_pub = rsp.Publisher("/exploring_challenge", img_info, queue_size = 1) self.zed_img = rsp.Subscriber("/camera/rgb/image_rect_color", Image, self.detect_img) #subscribes to the ZED camera image self.odom_sub = rsp.Subscriber("/vesc/odom", Odometry, self.odom_callback) self.last_arb_position = Point() self.gone_far_enough = True self.header = std_msgs.msg.Header() self.heightThresh = 75 #unit pixels MUST TWEAK self.odomThresh = 1 #unit meters self.blob_msg = img_info() rsp.init_node("vision_node")
def __init__(self): self.bridge = CvBridge() # initializes subscriber for Baxter's left hand camera image topic self.image_sub = rospy.Subscriber("/cameras/left_hand_camera/image",Image,self.find_cups) # initializes subscriber for the location of the treasure (published by the find_treasure node) self.treasure_location_sub = rospy.Subscriber("/treasure_location",Treasure,self.find_treasure) # initializes publisher to publish the location of the cup containing the treasure self.treasure_cup_pub = rospy.Publisher("treasure_cup_location",Point,queue_size=10) # initializes publisher to publish the processed image to Baxter's display self.image_tracking_pub = rospy.Publisher("/robot/xdisplay",Image,queue_size=10) self.treasure_cup_location = Point() self.cups = [] self.cupCenters = [[0,0],[0,0],[0,0]] self.wasPreviouslyTrue = False self.flag = False self.minRadius = 10 for i in range(0,3): self.cups.append(cup()) # determines the location of the 3 red cups (callback for the image topic subscriber)
def callback(data): """Callback function for subscribing to an Image topic and creating a buffer """ global dtype global images_so_far # Get cv image (which is a numpy array) from data cv_image = bridge.imgmsg_to_cv2(data) # Save dtype before we float32-ify it dtype = str(cv_image.dtype) # Insert and roll buffer buffer.insert(0, (np.asarray(cv_image, dtype='float32'), rospy.get_time())) if(len(buffer) > bufsize): buffer.pop() # for showing framerate images_so_far += 1 # Initialize subscriber with our callback
def make_mask(limb, filename): """ Given a limb (right or left) and a name to save to (in the baxter_tools/share/images/ directory), create a mask of any dark objects in the image from the camera and save it. """ image_sub = rospy.Subscriber( '/cameras/' + limb + '_hand_camera/image',Image,callback) try: bridge = CvBridge() cv_image = bridge.imgmsg_to_cv2(img, "bgr8") except CvBridgeError, e: print e msk = cv2.threshold(img, 250, 255, cv2.THRESH_BINARY_INV) return msk
def __init__(self, box_features, camera_channel='/camera_crop/image_rect_color', bbox_channel='/objectattention/bbox', feat_channel='/objectattention/features', similarity_channel='/objectattention/similarity'): """ weights should be a dictionary with variable names as keys and weights as values """ print "start" self._camera_channel = camera_channel self.curr_img = None self.fps = None print "session" self.init_model(box_features) self.temp = 1 self.bbox_publisher =rospy.Publisher(bbox_channel, Float64MultiArray, queue_size =1) self.feat_publisher =rospy.Publisher(feat_channel, Float64MultiArray, queue_size =1) self.similarity_publisher =rospy.Publisher(similarity_channel, Float64MultiArray, queue_size =1) self.image_subscriber = rospy.Subscriber(camera_channel, Image, self._process_image) self.msg = None rospy.init_node('image_proc',anonymous=True)
def __init__(self): self.imgprocess = ImageProcessor.ImageProcessor() self.bridge = CvBridge() self.latestimage = None self.process = False """ROS Subscriptions """ self.image_pub = rospy.Publisher("/image_converter/output_video",Image, queue_size=10) self.image_sub = rospy.Subscriber("/cam/camera_/image_raw",Image,self.cvt_image) self.cmdVelocityPub = rospy.Publisher('/platform_control/cmd_vel', Twist, queue_size=10) self.targetlane = 0 self.cmdvel = Twist() self.last_time = rospy.Time() self.sim_time = rospy.Time() self.dt = 0 self.position_er = 0 self.position_er_last = 0; self.cp = 0 self.vel_er = 0 self.cd = 0 self.Kp = 3 self.Kd = 3.5
def run(self): while True: # Only run loop if we have an image if self.process: self.imgprocess.ProcessingImage(self.latestimage) # FormulaPi Image Processing Function self.AdjustMotorSpeed(self.imgprocess.trackoffset) # Compute Motor Commands From Image Output # Publish Processed Image cvImage = self.imgprocess.outputImage try: imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") self.image_pub.publish(imgmsg) except CvBridgeError as e: print(e) #cv2.imshow("Image window", self.cvImage) #cv2.waitKey(3)
def handle_image_msg(msg): assert msg._type == 'sensor_msgs/Image' with g_fusion_lock: g_fusion.filter(EmptyObservation(msg.header.stamp.to_sec())) if g_fusion.last_state_mean is not None: pose = g_fusion.lidar_observation_function(g_fusion.last_state_mean) yaw_q = ros_tf.transformations.quaternion_from_euler(0, 0, pose[3]) br = ros_tf.TransformBroadcaster() br.sendTransform(tuple(pose[:3]), tuple(yaw_q), rospy.Time.now(), 'obj_fuse_centroid', 'velodyne') if last_known_box_size is not None: # bounding box marker = Marker() marker.header.frame_id = "obj_fuse_centroid" marker.header.stamp = rospy.Time.now() marker.type = Marker.CUBE marker.action = Marker.ADD marker.scale.x = last_known_box_size[2] marker.scale.y = last_known_box_size[1] marker.scale.z = last_known_box_size[0] marker.color = ColorRGBA(r=0., g=1., b=0., a=0.5) marker.lifetime = rospy.Duration() pub = rospy.Publisher("obj_fuse_bbox", Marker, queue_size=10) pub.publish(marker)
def __init__(self, topic): self.topic = topic self.windowNameOrig = "Camera: {0}".format(self.topic) cv2.namedWindow(self.windowNameOrig, 2) self.bridge = CvBridge() self.image_sub = rospy.Subscriber(self.topic, Image, self.callback)
def __init__(self): self.data_dir = rospy.get_param('/store_data/data_dir') self.category = rospy.get_param('/store_data/category') self.pic_count = rospy.get_param('/store_data/init_idx') self.rate = 1/float(rospy.get_param('/store_data/rate')) self.bridge = CvBridge() self.image_sub = rospy.Subscriber('/camera/rgb/image_color', Image, self.img_cb) self.patches_sub = rospy.Subscriber('/candidate_regions_depth', PolygonStamped, self.patches_cb) self.capture_sub = rospy.Subscriber('/capture/keyboard', Bool, self.capture_cb) # you can read this value off of your sensor from the '/camera/depth_registered/camera_info' topic self.P = np.array([[525.0, 0.0, 319.5, 0.0], [0.0, 525.0, 239.5, 0.0], [0.0, 0.0, 1.0, 0.0]]) self.capture = False
def __init__(self): """ Constructor """ self._image_pub = rospy.Publisher('/robot/head_display', Image, latch=True, queue_size=10)
def display_image(self, image_path, display_in_loop=False, display_rate=1.0): """ Displays image(s) to robot's head @type image_path: list @param image_path: the relative or absolute file path to the image file(s) @type display_in_loop: bool @param display_in_loop: Set True to loop through all image files infinitely @type display_rate: float @param display_rate: the rate to publish image into head """ rospy.logdebug("Display images in loop:'{0}', frequency: '{1}'".format(display_in_loop, display_rate)) image_msg = [] image_list = image_path if isinstance(image_path, list) else [image_path] for one_path in image_list: cv_img = self._setup_image(one_path) if cv_img: image_msg.append(cv_img) if not image_msg: rospy.logerr("Image message list is empty") else: r = rospy.Rate(display_rate) while not rospy.is_shutdown(): for one_msg in image_msg: self._image_pub.publish(one_msg) r.sleep() if not display_in_loop: break
def set_callback(self, camera_name, callback, callback_args=None, queue_size=10, rectify_image=True): """ Setup the callback function to show image. @type camera_name: str @param camera_name: camera name @type callback: fn(msg, cb_args) @param callback: function to call when data is received @type callback_args: any @param callback_args: additional arguments to pass to the callback @type queue_size: int @param queue_size: maximum number of messages to receive at a time @type rectify_image: bool @param rectify_image: specify whether subscribe to the rectified or raw (unrectified) image topic """ if self.verify_camera_exists(camera_name): if rectify_image == True: if self.cameras_io[camera_name]['is_color']: image_string = "image_rect_color" else: image_string = "image_rect" else: image_string = "image_raw" rospy.Subscriber('/'.join(["/io/internal_camera", camera_name, image_string]), Image, callback, callback_args=callback_args)
def ImgCcallback(self, ros_img): global imgi imgi = mx.img.imdecode(ros_img.data) global img_rect global img_raw global Frame img_rect = CvBridge().compressed_imgmsg_to_cv2(ros_img) # img_raw = img_rect.copy() Frame = pv.Image(img_rect.copy()) # cv2.imshow("tester", Frame.asOpenCV()) # def Imgcallback(self, ros_img): # global img_rect # img_rect = CvBridge().imgmsg_to_cv2(ros_img)
def __init__(self, context): """ :param context: QT context, aka parent """ super(FolderImagePublisherPlugin, self).__init__(context) # Widget setup self.setObjectName('FolderImagePublisherPlugin') self._widget = QWidget() context.add_widget(self._widget) # Layout and attach to widget layout = QHBoxLayout() self._widget.setLayout(layout) self._info = QLineEdit() self._info.setDisabled(True) self._info.setText("Please choose a directory (top-right corner)") layout.addWidget(self._info) self._left_button = QPushButton('<') self._left_button.clicked.connect(partial(self._rotate_and_publish, -1)) layout.addWidget(self._left_button) self._right_button = QPushButton('>') self._right_button.clicked.connect(partial(self._rotate_and_publish, 1)) layout.addWidget(self._right_button) # Set subscriber and service to None self._pub = rospy.Publisher("folder_image", Image, queue_size=1) self._bridge = CvBridge() self._files = collections.deque([])
def store_image(self, roi_image): """ Store the image :param roi_image: Image we would like to store """ if roi_image is not None and self.label is not None and self.output_directory is not None: image_writer.write_annotated(self.output_directory, roi_image, self.label, True)
def _image_callback(self, msg): """ Called when a new sensor_msgs/Image is coming in :param msg: The image messaeg """ try: cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") except CvBridgeError as e: rospy.logerr(e) self._image_widget.set_image(cv_image)
def _create_subscriber(self, topic_name): """ Method that creates a subscriber to a sensor_msgs/Image topic :param topic_name: The topic_name """ if self._sub: self._sub.unregister() self._sub = rospy.Subscriber(topic_name, Image, self._image_callback) rospy.loginfo("Listening to %s -- spinning .." % self._sub.name) self._widget.setWindowTitle("Label plugin, listening to (%s)" % self._sub.name)
def _create_subscriber(self, topic_name): """ Method that creates a subscriber to a sensor_msgs/Image topic :param topic_name: The topic_name """ if self._sub: self._sub.unregister() self._sub = rospy.Subscriber(topic_name, Image, self._image_callback) rospy.loginfo("Listening to %s -- spinning .." % self._sub.name) self._widget.setWindowTitle("Test plugin, listening to (%s)" % self._sub.name)