本文整理汇总了Python中pykitti.raw方法的典型用法代码示例。如果您正苦于以下问题:Python pykitti.raw方法的具体用法?Python pykitti.raw怎么用?Python pykitti.raw使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pykitti
的用法示例。
在下文中一共展示了pykitti.raw方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_kitti_video
# 需要导入模块: import pykitti [as 别名]
# 或者: from pykitti import raw [as 别名]
def make_kitti_video():
basedir = '/mnt/ssd2/od/KITTI/raw'
date = '2011_09_26'
drive = '0035'
dataset = pykitti.raw(basedir, date, drive)
videoname = "detection_{}_{}.avi".format(date, drive)
save_path = os.path.join(basedir, date, "{}_drive_{}_sync".format(date, drive), videoname)
run(dataset, save_path)
示例2: sequence
# 需要导入模块: import pykitti [as 别名]
# 或者: from pykitti import raw [as 别名]
def sequence(self, i):
if 0 <= i < 11:
sequence = pykitti.odometry(self.ododir, '{:02d}'.format(i))
sequence.poses = numpy.array(sequence.poses)
else:
drive = kittidrives.drives[i - 11]
sequence = pykitti.raw(self.rawdir, drive['date'], drive['drive'])
T_imu_cam0 = util.invert_ht(sequence.calib.T_cam0_imu)
sequence.poses = numpy.array(
[numpy.matmul(oxts.T_w_imu, T_imu_cam0) \
for oxts in sequence.oxts])
return sequence
示例3: run_vo_kitti
# 需要导入模块: import pykitti [as 别名]
# 或者: from pykitti import raw [as 别名]
def run_vo_kitti(basedir, date, drive, frames, outfile=None):
# Load KITTI data
dataset = pykitti.raw(basedir, date, drive, frames=frames, imformat='cv2')
first_oxts = dataset.oxts[0]
T_cam0_imu = SE3.from_matrix(dataset.calib.T_cam0_imu)
T_cam0_imu.normalize()
T_0_w = T_cam0_imu.dot(
SE3.from_matrix(first_oxts.T_w_imu).inv())
T_0_w.normalize()
# Create the camera
test_im = np.array(next(dataset.cam0))
fu = dataset.calib.K_cam0[0, 0]
fv = dataset.calib.K_cam0[1, 1]
cu = dataset.calib.K_cam0[0, 2]
cv = dataset.calib.K_cam0[1, 2]
b = dataset.calib.b_gray
h, w = test_im.shape
camera = StereoCamera(cu, cv, fu, fv, b, w, h)
# Ground truth
T_w_c_gt = [SE3.from_matrix(o.T_w_imu).dot(T_cam0_imu.inv())
for o in dataset.oxts]
# Pipeline
vo = DenseStereoPipeline(camera, first_pose=T_0_w)
# Skip the highest resolution level
vo.pyrlevel_sequence.pop()
vo.pyr_cameras.pop()
start = time.perf_counter()
for c_idx, impair in enumerate(dataset.gray):
vo.track(np.array(impair[0]), np.array(impair[1]))
# vo.track(impair[0], impair[1], guess=T_w_c_gt[c_idx].inv())
end = time.perf_counter()
print('Image {}/{} | {:.3f} s'.format(c_idx, len(dataset), end - start))
start = end
# Compute errors
T_w_c_est = [T.inv() for T in vo.T_c_w]
tm = TrajectoryMetrics(T_w_c_gt, T_w_c_est)
# Save to file
if outfile is not None:
print('Saving to {}'.format(outfile))
tm.savemat(outfile)
# Clean up
del vo
return tm
示例4: save_dynamic_tf
# 需要导入模块: import pykitti [as 别名]
# 或者: from pykitti import raw [as 别名]
def save_dynamic_tf(bag, kitti, kitti_type, initial_time):
print("Exporting time dependent transformations")
if kitti_type.find("raw") != -1:
for timestamp, oxts in zip(kitti.timestamps, kitti.oxts):
tf_oxts_msg = TFMessage()
tf_oxts_transform = TransformStamped()
tf_oxts_transform.header.stamp = rospy.Time.from_sec(float(timestamp.strftime("%s.%f")))
tf_oxts_transform.header.frame_id = 'world'
tf_oxts_transform.child_frame_id = 'base_link'
transform = (oxts.T_w_imu)
t = transform[0:3, 3]
q = tf.transformations.quaternion_from_matrix(transform)
oxts_tf = Transform()
oxts_tf.translation.x = t[0]
oxts_tf.translation.y = t[1]
oxts_tf.translation.z = t[2]
oxts_tf.rotation.x = q[0]
oxts_tf.rotation.y = q[1]
oxts_tf.rotation.z = q[2]
oxts_tf.rotation.w = q[3]
tf_oxts_transform.transform = oxts_tf
tf_oxts_msg.transforms.append(tf_oxts_transform)
bag.write('/tf', tf_oxts_msg, tf_oxts_msg.transforms[0].header.stamp)
elif kitti_type.find("odom") != -1:
timestamps = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)
for timestamp, tf_matrix in zip(timestamps, kitti.T_w_cam0):
tf_msg = TFMessage()
tf_stamped = TransformStamped()
tf_stamped.header.stamp = rospy.Time.from_sec(timestamp)
tf_stamped.header.frame_id = 'world'
tf_stamped.child_frame_id = 'camera_left'
t = tf_matrix[0:3, 3]
q = tf.transformations.quaternion_from_matrix(tf_matrix)
transform = Transform()
transform.translation.x = t[0]
transform.translation.y = t[1]
transform.translation.z = t[2]
transform.rotation.x = q[0]
transform.rotation.y = q[1]
transform.rotation.z = q[2]
transform.rotation.w = q[3]
tf_stamped.transform = transform
tf_msg.transforms.append(tf_stamped)
bag.write('/tf', tf_msg, tf_msg.transforms[0].header.stamp)
示例5: save_camera_data
# 需要导入模块: import pykitti [as 别名]
# 或者: from pykitti import raw [as 别名]
def save_camera_data(bag, kitti_type, kitti, util, bridge, camera, camera_frame_id, topic, initial_time):
print("Exporting camera {}".format(camera))
if kitti_type.find("raw") != -1:
camera_pad = '{0:02d}'.format(camera)
image_dir = os.path.join(kitti.data_path, 'image_{}'.format(camera_pad))
image_path = os.path.join(image_dir, 'data')
image_filenames = sorted(os.listdir(image_path))
with open(os.path.join(image_dir, 'timestamps.txt')) as f:
image_datetimes = map(lambda x: datetime.strptime(x[:-4], '%Y-%m-%d %H:%M:%S.%f'), f.readlines())
calib = CameraInfo()
calib.header.frame_id = camera_frame_id
calib.width, calib.height = tuple(util['S_rect_{}'.format(camera_pad)].tolist())
calib.distortion_model = 'plumb_bob'
calib.K = util['K_{}'.format(camera_pad)]
calib.R = util['R_rect_{}'.format(camera_pad)]
calib.D = util['D_{}'.format(camera_pad)]
calib.P = util['P_rect_{}'.format(camera_pad)]
elif kitti_type.find("odom") != -1:
camera_pad = '{0:01d}'.format(camera)
image_path = os.path.join(kitti.sequence_path, 'image_{}'.format(camera_pad))
image_filenames = sorted(os.listdir(image_path))
image_datetimes = map(lambda x: initial_time + x.total_seconds(), kitti.timestamps)
calib = CameraInfo()
calib.header.frame_id = camera_frame_id
calib.P = util['P{}'.format(camera_pad)]
iterable = zip(image_datetimes, image_filenames)
bar = progressbar.ProgressBar()
for dt, filename in bar(iterable):
image_filename = os.path.join(image_path, filename)
cv_image = cv2.imread(image_filename)
calib.height, calib.width = cv_image.shape[:2]
if camera in (0, 1):
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
encoding = "mono8" if camera in (0, 1) else "bgr8"
image_message = bridge.cv2_to_imgmsg(cv_image, encoding=encoding)
image_message.header.frame_id = camera_frame_id
if kitti_type.find("raw") != -1:
image_message.header.stamp = rospy.Time.from_sec(float(datetime.strftime(dt, "%s.%f")))
topic_ext = "/image_raw"
elif kitti_type.find("odom") != -1:
image_message.header.stamp = rospy.Time.from_sec(dt)
topic_ext = "/image_rect"
calib.header.stamp = image_message.header.stamp
bag.write(topic + topic_ext, image_message, t = image_message.header.stamp)
bag.write(topic + '/camera_info', calib, t = calib.header.stamp)