我们从Python开源项目中,提取了以下39个代码示例,用于说明如何使用tf.TransformBroadcaster()。
def __init__(self): # first let's load all parameters: self.frame_id = rospy.get_param("~frame_id", "odom_meas") self.camera_frame_id = rospy.get_param("~camera_frame_id", "overhead_cam_frame") self.base_frame_id = rospy.get_param("~base_frame_id", "base_meas") self.x0 = rospy.get_param("~x0", 0.0) self.y0 = rospy.get_param("~y0", 0.0) self.th0 = rospy.get_param("~th0", 0.0) self.pubstate = rospy.get_param("~pubstate", True) self.marker_id = rospy.get_param("~marker_id", 12) # setup other required vars: self.odom_offset = odom_conversions.numpy_to_odom(np.array([self.x0, self.y0, self.th0]), self.frame_id) self.path_list = deque([], maxlen=PATH_LEN) # now let's create publishers, listeners, and subscribers self.br = tf.TransformBroadcaster() self.listener = tf.TransformListener() self.meas_pub = rospy.Publisher("meas_pose", Odometry, queue_size=5) self.path_pub = rospy.Publisher("meas_path", Path, queue_size=1) self.alvar_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.alvarcb) self.publish_serv = rospy.Service("publish_bool", SetBool, self.pub_bool_srv_cb) self.offset_serv = rospy.Service("set_offset", SetOdomOffset, self.offset_srv_cb) return
def __init__(self): """Initialize tracker. """ self._read_configuration() self.estimates = {} self.estimate_times = {} self.ikf_prev_outlier_flags = {} self.ikf_outlier_counts = {} self.outlier_thresholds = {} rospy.loginfo("Receiving raw multi-range messages from: {}".format(self.uwb_multi_range_topic)) rospy.loginfo("Publishing tracker messages to {}".format(self.uwb_tracker_topic)) rospy.loginfo("Publishing tracker transform as {} -> {}".format(self.tracker_frame, self.target_frame)) # ROS publishers and subscribers self.tracker_frame = self.tracker_frame self.target_frame = self.target_frame self.uwb_pub = rospy.Publisher(self.uwb_tracker_topic, uwb.msg.UWBTracker, queue_size=1) self.tf_broadcaster = tf.TransformBroadcaster() self.uwb_multi_range_sub = rospy.Subscriber(self.uwb_multi_range_topic, uwb.msg.UWBMultiRangeWithOffsets, self.handle_multi_range_message)
def __init__(self): self.listen = tf.TransformListener() self.broadcast = tf.TransformBroadcaster() self.obj_pose_sub = rospy.Subscriber("/pick_frame_name", String, self.set_pick_frame, queue_size=1) self.obj_pose_sub = rospy.Subscriber("/place_frame_name", String, self.set_place_frame, queue_size=1) self.obj_pose_sub = rospy.Subscriber("/num_objects", Int64, self.broadcast_frames, queue_size=1) self.place_frame = '' self.pick_frame = '' self.tower_size = 1 self.tower_size_sub = rospy.Subscriber("/tower_size",Int64,self.set_tower_size)
def __init__(self): self.marker_id = rospy.get_param("~marker_id", 12) self.frame_id = rospy.get_param("~frame_id", "odom_meas") self.camera_frame_id = rospy.get_param("~camera_frame_id", "overhead_cam_frame") self.count = rospy.get_param("~count", 50) # local vars: self.calibrate_flag = False self.calibrated = False self.calibrate_count = 0 self.kb = kbhit.KBHit() self.trans_arr = np.zeros((self.count, 3)) self.quat_arr = np.zeros((self.count, 4)) self.trans_ave = np.zeros(3) self.quat_ave = np.zeros(3) # now create subscribers, timers, and publishers self.br = tf.TransformBroadcaster() self.kb_timer = rospy.Timer(rospy.Duration(0.1), self.keycb) rospy.on_shutdown(self.kb.set_normal_term) self.alvar_sub = rospy.Subscriber("ar_pose_marker", AlvarMarkers, self.alvarcb) return
def _init_pubsub(self): self.joint_states_pub = rospy.Publisher('joint_states', JointState) self.odom_pub = rospy.Publisher('odom', Odometry) self.sensor_state_pub = rospy.Publisher('~sensor_state', TurtlebotSensorState) self.operating_mode_srv = rospy.Service('~set_operation_mode', SetTurtlebotMode, self.set_operation_mode) self.digital_output_srv = rospy.Service('~set_digital_outputs', SetDigitalOutputs, self.set_digital_outputs) if self.drive_mode == 'twist': self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Twist, self.cmd_vel) self.drive_cmd = self.robot.direct_drive elif self.drive_mode == 'drive': self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Drive, self.cmd_vel) self.drive_cmd = self.robot.drive elif self.drive_mode == 'turtle': self.cmd_vel_sub = rospy.Subscriber('cmd_vel', Turtle, self.cmd_vel) self.drive_cmd = self.robot.direct_drive else: rospy.logerr("unknown drive mode :%s"%(self.drive_mode)) self.transform_broadcaster = None if self.publish_tf: self.transform_broadcaster = tf.TransformBroadcaster()
def handle_image_msg(msg): assert msg._type == 'sensor_msgs/Image' with g_fusion_lock: g_fusion.filter(EmptyObservation(msg.header.stamp.to_sec())) if g_fusion.last_state_mean is not None: pose = g_fusion.lidar_observation_function(g_fusion.last_state_mean) yaw_q = ros_tf.transformations.quaternion_from_euler(0, 0, pose[3]) br = ros_tf.TransformBroadcaster() br.sendTransform(tuple(pose[:3]), tuple(yaw_q), rospy.Time.now(), 'obj_fuse_centroid', 'velodyne') if last_known_box_size is not None: # bounding box marker = Marker() marker.header.frame_id = "obj_fuse_centroid" marker.header.stamp = rospy.Time.now() marker.type = Marker.CUBE marker.action = Marker.ADD marker.scale.x = last_known_box_size[2] marker.scale.y = last_known_box_size[1] marker.scale.z = last_known_box_size[0] marker.color = ColorRGBA(r=0., g=1., b=0., a=0.5) marker.lifetime = rospy.Duration() pub = rospy.Publisher("obj_fuse_bbox", Marker, queue_size=10) pub.publish(marker)
def __init__(self, name): self.robotname = rospy.get_param("~robot") self.br = tf.TransformBroadcaster() self.sub = rospy.Subscriber('/%s/base_pose_ground_truth' % self.robotname, Odometry, self.handle_robot_pose, self.robotname)
def broadcast_position(pose, to_frame, from_frame): broadcaster = tf.TransformBroadcaster() pose = pose broadcaster.sendTransform( (pose.position.x, pose.position.y, pose.position.z), (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w), rospy.Time.now(), to_frame, from_frame )
def publish_transform(name, pos): broadcast = tf.TransformBroadcaster() while(True): broadcast.sendTransform(tuple(pos), [0,0,0,1], rospy.Time.now(), name, "root") rospy.sleep(1)
def __init__(self, topic='/sensor_values'): self.bx = SmartBaxter('left') self.br = tf.TransformBroadcaster() self.tl = tf.TransformListener()
def __init__(self): self.tfBuffer = tf2_ros.Buffer() self.listen = tf2_ros.TransformListener(self.tfBuffer) self.listener = tf.TransformListener() self.broadcast = tf.TransformBroadcaster()
def __init__(self): self.listen = tf.TransformListener() self.broadcast = tf.TransformBroadcaster() self.obj_pose_sub = rospy.Subscriber("/num_objects", Int64, self.broadcast_frame, queue_size=1)
def __init__(self): super(PerceptionNode, self).__init__('p_node') self.transition_table = { # If calibration has already happened, allow skipping it 'initial': {'q': self._perceive}, 'perceive': {'q': self._post_perceive}, } # Hardcoded place for now self.tl = tf.TransformListener() self.num_objects = 0 # Would this work too? Both tf and tf2 have (c) 2008... # self.tf2 = tf2_ros.TransformListener() self.n_objects_sub = rospy.Subscriber("/num_objects", Int64, self.update_num_objects, queue_size=1) self.perception_pub = rospy.Publisher("/perception/enabled", Bool, queue_size=1) self.br = tf.TransformBroadcaster() # Dict to map imarker names and their updated poses self.int_markers = {} # Ideally this call would be in a Factory/Metaclass/Parent self.show_options()
def __init__(self, frequency): self.tag_names = rospy.get_param("tag_names") self.frame_id = rospy.get_param("~frame_id") self.transforms = rospy.get_param("tags") self.rate = rospy.Rate(frequency) #self.sub = rospy.Subscriber(self.tag_position_topic, PoseArray, self.tag_position_cb) self.br = tf.TransformBroadcaster() self.make_transforms()
def __init__(self): """Set up class variables, initialize broadcaster and start subscribers.""" # Create broadcasters self.br_head = tf.TransformBroadcaster() self.br_arm = tf.TransformBroadcaster() # Create subscribers rospy.Subscriber("bax_head/pose", PoseStamped, self.head_callback, queue_size=1) rospy.Subscriber("bax_arm/pose", PoseStamped, self.arm_callback, queue_size=1) # Main while loop. rate = rospy.Rate(50) while not rospy.is_shutdown(): rate.sleep()
def __init__(self): """Set up class variables, initialize broadcaster and start subscribers.""" # Create broadcasters self.br_head = tf.TransformBroadcaster() self.br_rods = tf.TransformBroadcaster() # Create subscribers rospy.Subscriber("bax_head/pose", PoseStamped, self.head_callback, queue_size=1) rospy.Subscriber("rods/pose", PoseStamped, self.rod_callback, queue_size=1) # Main while loop. rate = rospy.Rate(50) while not rospy.is_shutdown(): rate.sleep()
def __init__(self): self.rate = rospy.get_param('~rate', 10.0) # the rate at which to publish the transform self.submodelsToBeIgnored = rospy.get_param('~ignore_submodels_of', '').split(';') self.tfBroadcaster = tf.TransformBroadcaster() self.link_states_msg = None self.model_states_msg = None self.model_cache = {} self.updatePeriod = 1. / self.rate self.enable_publisher_marker = False self.enable_publisher_tf = False self.markerPub = rospy.Publisher('/model_marker', Marker, queue_size=10) self.modelStatesSub = rospy.Subscriber('gazebo/model_states', ModelStates, self.on_model_states_msg, queue_size=1) self.linkStatesSub = rospy.Subscriber('gazebo/link_states', LinkStates, self.on_link_states_msg, queue_size=1) rate_sleep = rospy.Rate(self.rate) # Main while loop while not rospy.is_shutdown(): if self.enable_publisher_marker and self.enable_publisher_tf: self.publish_tf() self.publish_marker() rate_sleep.sleep()
def __init__(self): # initialize ROS node and transform publisher rospy.init_node('crazyflie_detector', anonymous=True) self.pub_tf = tf.TransformBroadcaster() self.rate = rospy.Rate(50.0) # publish transform at 50 Hz # initialize values for crazyflie location on Kinect v2 image self.cf_u = 0 # u is pixels left(0) to right(+) self.cf_v = 0 # v is pixels top(0) to bottom(+) self.cf_d = 0 # d is distance camera(0) to crazyflie(+) from depth image self.last_d = 0 # last non-zero depth measurement # crazyflie orientation to Kinect v2 image (Euler) self.r = -1.5708 self.p = 0 self.y = -3.1415 # Convert image from a ROS image message to a CV image self.bridge = CvBridge() cv2.namedWindow("KinectV2", 1) # Wait for the camera_info topic to become available rospy.wait_for_message('/kinect2/qhd/camera_info', CameraInfo) # Subscribe to Kinect v2 sd camera_info to get image frame height and width rospy.Subscriber('/kinect2/qhd/camera_info', CameraInfo, self.camera_data, queue_size=1) # Subscribe to registered color and depth images rospy.Subscriber('/kinect2/qhd/image_color_rect', Image, self.image_callback, queue_size=1) rospy.Subscriber('/kinect2/qhd/image_depth_rect', Image, self.depth_callback, queue_size=1) self.rate.sleep() # suspend until next cycle # This callback function sets parameters regarding the camera.
def __init__(self): # ATF code self.atf = ATF() # native app code self.pub_freq = 20.0 # Hz self.br = tf.TransformBroadcaster() rospy.sleep(1) #wait for tf broadcaster to get active (rospy bug?)
def publish_odom(self, cur_x, cur_y, cur_theta, vx, vth): quat = tf.transformations.quaternion_from_euler(0, 0, cur_theta) current_time = rospy.Time.now() br = tf.TransformBroadcaster() br.sendTransform((cur_x, cur_y, 0), tf.transformations.quaternion_from_euler(0, 0, cur_theta), current_time, "base_link", "odom") odom = Odometry() odom.header.stamp = current_time odom.header.frame_id = 'odom' odom.pose.pose.position.x = cur_x odom.pose.pose.position.y = cur_y odom.pose.pose.position.z = 0.0 odom.pose.pose.orientation = Quaternion(*quat) odom.pose.covariance[0] = 0.01 odom.pose.covariance[7] = 0.01 odom.pose.covariance[14] = 99999 odom.pose.covariance[21] = 99999 odom.pose.covariance[28] = 99999 odom.pose.covariance[35] = 0.01 odom.child_frame_id = 'base_link' odom.twist.twist.linear.x = vx odom.twist.twist.linear.y = 0 odom.twist.twist.angular.z = vth odom.twist.covariance = odom.pose.covariance self.odom_pub.publish(odom)
def handle_msg_car(msg, who): assert isinstance(msg, Odometry) global last_cap_r, last_cap_f, last_cap_yaw if who == 'cap_r': last_cap_r = rtk_position_to_numpy(msg) elif who == 'cap_f' and last_cap_r is not None: cap_f = rtk_position_to_numpy(msg) cap_r = last_cap_r last_cap_f = cap_f last_cap_yaw = get_yaw(cap_f, cap_r) elif who == 'obs_r' and last_cap_f is not None and last_cap_yaw is not None: md = None for obs in metadata: if obs['obstacle_name'] == 'obs1': md = obs assert md, 'obs1 metadata not found' # find obstacle rear RTK to centroid vector lrg_to_gps = [md['rear_gps_l'], -md['rear_gps_w'], md['rear_gps_h']] lrg_to_centroid = [md['l'] / 2., -md['w'] / 2., md['h'] / 2.] obs_r_to_centroid = np.subtract(lrg_to_centroid, lrg_to_gps) # in the fixed GPS frame cap_f = last_cap_f obs_r = rtk_position_to_numpy(msg) # in the capture vehicle front RTK frame cap_to_obs = np.dot(rotMatZ(-last_cap_yaw), obs_r - cap_f) cap_to_obs_centroid = cap_to_obs + obs_r_to_centroid br = tf.TransformBroadcaster() br.sendTransform(tuple(cap_to_obs_centroid), (0,0,0,1), rospy.Time.now(), 'obs_centroid', 'gps_antenna_front') # publish obstacle bounding box marker = Marker() marker.header.frame_id = "obs_centroid" marker.header.stamp = rospy.Time.now() marker.type = Marker.CUBE marker.action = Marker.ADD marker.scale.x = md['l'] marker.scale.y = md['w'] marker.scale.z = md['h'] marker.color.r = 0.2 marker.color.g = 0.5 marker.color.b = 0.2 marker.color.a = 0.5 marker.lifetime = rospy.Duration() pub = rospy.Publisher("obs_bbox", Marker, queue_size=10) pub.publish(marker)
def handle_radar_msg(msg, dont_fuse): assert msg._md5sum == '6a2de2f790cb8bb0e149d45d297462f8' publish_rviz_topics = True with g_fusion_lock: # do we have any estimation? if g_fusion.last_state_mean is not None: pose = g_fusion.lidar_observation_function(g_fusion.last_state_mean) observations = RadarObservation.from_msg(msg, RADAR_TO_LIDAR, 0.9115) # find nearest observation to current object position estimation distance_threshold = 4.4 nearest = None nearest_dist = 1e9 for o in observations: dist = [o.x - pose[0], o.y - pose[1], o.z - pose[2]] dist = np.sqrt(np.array(dist).dot(dist)) if dist < nearest_dist and dist < distance_threshold: nearest_dist = dist nearest = o if nearest is not None: # FUSION if not dont_fuse: g_fusion.filter(nearest) if publish_rviz_topics: header = Header() header.frame_id = '/velodyne' header.stamp = rospy.Time.now() point = np.array([[nearest.x, nearest.y, nearest.z]], dtype=np.float32) rospy.Publisher(name='obj_radar_nearest', data_class=PointCloud2, queue_size=1).publish(pc2.create_cloud_xyz32(header, point)) #pose = g_fusion.lidar_observation_function(g_fusion.last_state_mean) #br = ros_tf.TransformBroadcaster() #br.sendTransform(tuple(pose), (0,0,0,1), rospy.Time.now(), 'car_fuse_centroid', 'velodyne') # if last_known_box_size is not None: # # bounding box # marker = Marker() # marker.header.frame_id = "car_fuse_centroid" # marker.header.stamp = rospy.Time.now() # marker.type = Marker.CUBE # marker.action = Marker.ADD # marker.scale.x = last_known_box_size[2] # marker.scale.y = last_known_box_size[1] # marker.scale.z = last_known_box_size[0] # marker.color = ColorRGBA(r=1., g=1., b=0., a=0.5) # marker.lifetime = rospy.Duration() # pub = rospy.Publisher("car_fuse_bbox", Marker, queue_size=10) # pub.publish(marker)
def move_cup(self): self.gripper.close() rotate = long(random.randrange(20, 180))*(3.14156)/(180) print("rotate: " + str(rotate)) t = self.tl.getLatestCommonTime("/root", "/right_gripper") cur_position, quaternion = self.tl.lookupTransform("/root", "/right_gripper", t) quaternion = [1,0,0,0] requrd_rot = (0,0,rotate) # in radians requrd_trans = (0,0,0) rotated_pose = self.getOffsetPoses(cur_position, quaternion, requrd_rot, requrd_trans) trans_5= tuple(rotated_pose[:3]) quat_5= tuple(rotated_pose[3:]) br = tf.TransformBroadcaster() br.sendTransform(trans_5, quat_5, rospy.Time.now(), "pick_pose", "/root") pick_pose = PoseStamped(Header(0, rospy.Time(0), n.robot.base), Pose(Point(*trans_5), Quaternion(*quat_5))) self.move_ik(pick_pose) self.gripper.open() t = self.tl.getLatestCommonTime("/root", "/right_gripper") cur_position, quaternion = self.tl.lookupTransform("/root", "/right_gripper", t) cur_position = list(cur_position) cur_position[2] = cur_position[2] + 0.08 pose = Pose(Point(*cur_position), Quaternion(*quaternion)) while True: try: stamped_pose = PoseStamped( Header(0, rospy.Time(0), n.robot.base), pose) self.move_ik(stamped_pose) break except AttributeError: print("can't find valid pose for gripper cause I'm dumb") return False rospy.sleep(2) home = PoseStamped( Header(0, rospy.Time(0), n.robot.base), Pose(Point(0.60558, -0.24686, 0.093535), Quaternion(0.99897, -0.034828, 0.027217, 0.010557))) self.move_ik(home) rospy.sleep(2)
def control_from_sensor_values(self): log_values = np.log(self.bx.inside) # Match which side is which. Ideally, if the sign of the diff # matches whether the gripper needs to move towards the # positive or negative part of the y axis in left_gripper. # That is, we need left side - right side (using left/right # like in l_gripper_{l, r}_finger_tip tfs) # We want to take log(values) for a better behaved controller inside_diff = log_values[7:] - log_values[:7] scalar_diff = sum(inside_diff) / len(inside_diff) # Take negative for one of the sides, so that angles should # match for a parallel object in the gripper l_angle, _ = np.polyfit(np.arange(7), log_values[:7], 1) r_angle, _ = np.polyfit(np.arange(7), -log_values[7:], 1) rospy.loginfo('Angle computed from l: {}'.format(np.rad2deg(l_angle))) rospy.loginfo('Angle computed from r: {}'.format(np.rad2deg(r_angle))) avg_angle = np.arctan((l_angle + r_angle) / 2.0) # Let's get a frame by the middle of the end effector # p = Point(0, 0, -0.05) p = (0, 0, -0.05) # Of course, tf uses (w, x, y, z), Quaternion uses x, y, z, # w. However, tf.TransformBroadcaster().sendTransform uses the # tf order. # q = Quaternion(q[1], q[2], q[3], q[0]) q = tf.transformations.quaternion_about_axis(avg_angle, (1, 0, 0)) # We had a lot of trouble sending a transform (p, q) from # left_gripper, and then asking for a point in the last frame # in the tf coordinate. Timing issues that I couldn't # solve. Instead, do it manually here: m1 = tf.transformations.quaternion_matrix(q) m1[:3, 3] = p p = (0, scalar_diff / 100, 0.05) m2 = np.eye(4) m2[:3, 3] = p m = m2.dot(m1) # Extract pose now p = Point(*m[:3, 3]) q = Quaternion(*tf.transformations.quaternion_from_matrix(m)) time = rospy.Time(0) h = Header() h.frame_id = 'left_gripper' h.stamp = time pose = Pose(p, q) new_endpose = self.tl.transformPose('base', PoseStamped(h, pose)) self.bx.move_ik(new_endpose)
def __init__(self, robot, *robotargs): super(PickAndPlaceNode, self).__init__('pp_node') rospy.loginfo("PickAndPlaceNode") _post_perceive_trans = defaultdict(lambda: self._pick) _post_perceive_trans.update({'c': self._calibrate}) _preplace = defaultdict(lambda: self._preplace) self.transition_table = { # If calibration has already happened, allow skipping it 'initial': {'c': self._calibrate, 'q': self._perceive, 's': self._preplace}, 'calibrate': {'q': self._perceive, 'c': self._calibrate}, 'perceive': {'p': self._post_perceive, 'q': self._perceive, 's': self._stop_perceive, 'c': self._calibrate}, 'post_perceive': _post_perceive_trans, 'postpick': {'1': self._level, '2': self._level, '9': self._level}, 'level': _preplace, 'preplace': {'s': self._place}, 'place': {'q': self._perceive, 'c': self._calibrate} } rospy.loginfo("PickAndPlaceNode1") if callable(robot): self.robot = robot(*robotargs) else: self.robot = robot self.robot.level = 1 rospy.loginfo("PickAndPlaceNode2") # Hardcoded place for now self.place_pose = PoseStamped( Header(0, rospy.Time(0), self.robot.base), Pose(Point(0.526025806, 0.4780144, -0.161326153), Quaternion(1, 0, 0, 0))) self.tl = tf.TransformListener() self.num_objects = 0 # Would this work too? Both tf and tf2 have (c) 2008... # self.tf2 = tf2_ros.TransformListener() self.n_objects_sub = rospy.Subscriber("/num_objects", Int64, self.update_num_objects, queue_size=1) self.perception_pub = rospy.Publisher("/perception/enabled", Bool, queue_size=1) self.alignment_pub = rospy.Publisher("/alignment/doit", Bool, queue_size=1) self.br = tf.TransformBroadcaster() rospy.loginfo("PickAndPlaceNode3") self.int_marker_server = InteractiveMarkerServer('int_markers') # Dict to map imarker names and their updated poses self.int_markers = {} # Ideally this call would be in a Factory/Metaclass/Parent self.show_options() self.perceive = False # self.robot.home() # self.move_calib_position()