本文整理匯總了Python中open3d.PointCloud方法的典型用法代碼示例。如果您正苦於以下問題:Python open3d.PointCloud方法的具體用法?Python open3d.PointCloud怎麽用?Python open3d.PointCloud使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類open3d
的用法示例。
在下文中一共展示了open3d.PointCloud方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: vis_pc
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def vis_pc(xyz, color_axis=-1, rgb=None):
# TODO move to the other module and do import in the module
import open3d
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(xyz)
if color_axis >= 0:
if color_axis == 3:
axis_vis = np.arange(0, xyz.shape[0], dtype=np.float32)
else:
axis_vis = xyz[:, color_axis]
min_ = np.min(axis_vis)
max_ = np.max(axis_vis)
colors = cm.gist_rainbow((axis_vis - min_) / (max_ - min_))[:, 0:3]
pcd.colors = open3d.Vector3dVector(colors)
if rgb is not None:
pcd.colors = open3d.Vector3dVector(rgb)
open3d.draw_geometries([pcd])
示例2: xyzi2pc
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def xyzi2pc(xyz, intensities=None):
pc = o3.PointCloud()
pc.points = o3.Vector3dVector(xyz)
if intensities is not None:
pc.colors = o3.Vector3dVector(intensity2color(intensities / 255.0))
return pc
示例3: downsample_point_clouds
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def downsample_point_clouds():
cfg = parse_arguments()
vox_size = cfg.downsample_voxel_size
synth_set = cfg.synth_set
inp_dir = os.path.join(cfg.inp_dir, synth_set)
files = glob.glob('{}/*.mat'.format(inp_dir))
out_dir = cfg.out_dir
out_synthset = os.path.join(out_dir, cfg.synth_set)
mkdir_if_missing(out_synthset)
for k, model_file in enumerate(files):
print("{}/{}".format(k, len(files)))
file_name = os.path.basename(model_file)
sample_name, _ = os.path.splitext(file_name)
obj = scipy.io.loadmat(model_file)
out_filename = "{}/{}.mat".format(out_synthset, sample_name)
if os.path.isfile(out_filename):
print("already exists:", sample_name)
continue
Vgt = obj["points"]
pcd = open3d.PointCloud()
pcd.points = open3d.Vector3dVector(Vgt)
downpcd = open3d.voxel_down_sample(pcd, voxel_size=vox_size)
down_xyz = np.asarray(downpcd.points)
scipy.io.savemat(out_filename, {"points": down_xyz})
示例4: draw_registration_result
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def draw_registration_result(src, trgt):
source = open3d.PointCloud()
source.points = open3d.Vector3dVector(src)
target = open3d.PointCloud()
target.points = open3d.Vector3dVector(trgt)
source.paint_uniform_color([1, 0.706, 0])
target.paint_uniform_color([0, 0.651, 0.929])
open3d.draw_geometries([source, target])
示例5: open3d_icp
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def open3d_icp(src, trgt, init_rotation=np.eye(3, 3)):
source = open3d.PointCloud()
source.points = open3d.Vector3dVector(src)
target = open3d.PointCloud()
target.points = open3d.Vector3dVector(trgt)
init_rotation_4x4 = np.eye(4, 4)
init_rotation_4x4[0:3, 0:3] = init_rotation
threshold = 0.2
reg_p2p = open3d.registration_icp(source, target, threshold, init_rotation_4x4,
open3d.TransformationEstimationPointToPoint())
return reg_p2p
示例6: main
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def main():
bot = Robot("locobot")
vis = open3d.Visualizer()
vis.create_window("3D Map")
pcd = open3d.PointCloud()
coord = open3d.create_mesh_coordinate_frame(1, [0, 0, 0])
vis.add_geometry(pcd)
vis.add_geometry(coord)
# We update the viewer every a few seconds
update_time = 4
while True:
start_time = time.time()
pts, colors = bot.base.base_state.vslam.get_3d_map()
pcd.clear()
# note that open3d.Vector3dVector is slow
pcd.points = open3d.Vector3dVector(pts)
pcd.colors = open3d.Vector3dVector(colors / 255.0)
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
s_t = time.time()
while time.time() - s_t < update_time:
vis.poll_events()
vis.update_renderer()
time.sleep(0.1)
end_time = time.time()
process_time = end_time - start_time
print("Updating every {0:.2f}s".format(process_time))
示例7: main
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def main():
parser = argparse.ArgumentParser(description="Argument Parser")
parser.add_argument(
"--floor_height", type=float, default=0.03, help="the z coordinate of the floor"
)
args = parser.parse_args()
np.set_printoptions(precision=4, suppress=True)
bot = Robot("locobot")
bot.camera.set_pan_tilt(0, 0.7, wait=True)
bot.arm.go_home()
bot.arm.set_joint_positions([1.96, 0.52, -0.51, 1.67, 0.01], plan=False)
vis = open3d.Visualizer()
vis.create_window("3D Map")
pcd = open3d.PointCloud()
coord = open3d.create_mesh_coordinate_frame(1, [0, 0, 0])
vis.add_geometry(pcd)
vis.add_geometry(coord)
while True:
pts, colors = bot.camera.get_current_pcd(in_cam=False)
pts, colors = filter_points(pts, colors, z_lowest=args.floor_height)
pcd.clear()
# note that open3d.Vector3dVector is slow
pcd.points = open3d.Vector3dVector(pts)
pcd.colors = open3d.Vector3dVector(colors / 255.0)
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
time.sleep(0.1)
示例8: main
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def main():
bot = Robot("kinect2")
vis = open3d.Visualizer()
vis.create_window("3D Map")
pcd = open3d.PointCloud()
coord = open3d.create_mesh_coordinate_frame(1, [0, 0, 0])
vis.add_geometry(pcd)
vis.add_geometry(coord)
# We update the viewer every a few seconds
update_time = 4
while True:
start_time = time.time()
pts, colors = bot.camera.get_current_pcd()
pcd.clear()
# note that open3d.Vector3dVector is slow
pcd.points = open3d.Vector3dVector(pts)
pcd.colors = open3d.Vector3dVector(colors / 255.0)
vis.update_geometry()
vis.poll_events()
vis.update_renderer()
s_t = time.time()
while time.time() - s_t < update_time:
vis.poll_events()
vis.update_renderer()
time.sleep(0.1)
end_time = time.time()
process_time = end_time - start_time
print("Updating every {0:.2f}s".format(process_time))
示例9: load_snapshot
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def load_snapshot(sessionname):
cloud = o3.PointCloud()
trajectory = o3.LineSet()
with np.load(os.path.join(resultdir, sessionname, snapshotfile)) as data:
cloud.points = o3.Vector3dVector(data['points'])
cloud.colors = o3.Vector3dVector(
util.intensity2color(data['intensities'] / 255.0))
trajectory.points = o3.Vector3dVector(data['trajectory'])
lines = np.reshape(range(data['trajectory'].shape[0] - 1), [-1, 1]) \
+ [0, 1]
trajectory.lines = o3.Vector2iVector(lines)
trajectory.colors = o3.Vector3dVector(
np.tile([0.0, 0.5, 0.0], [lines.shape[0], 1]))
return cloud, trajectory
示例10: view_local_maps
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def view_local_maps(sessionname):
sessiondir = os.path.join('nclt', sessionname)
session = pynclt.session(sessionname)
maps = np.load(os.path.join(sessiondir, get_localmapfile()))['maps']
for i, map in enumerate(maps):
print('Map #{}'.format(i))
mapboundsvis = util.create_wire_box(mapextent, [0.0, 0.0, 1.0])
mapboundsvis.transform(map['T_w_m'])
polevis = []
for poleparams in map['poleparams']:
x, y, zs, ze, a = poleparams[:5]
pole = util.create_wire_box(
[a, a, ze - zs], color=[1.0, 1.0, 0.0])
T_m_p = np.identity(4)
T_m_p[:3, 3] = [x - 0.5 * a, y - 0.5 * a, zs]
pole.transform(map['T_w_m'].dot(T_m_p))
polevis.append(pole)
accucloud = o3.PointCloud()
for j in range(map['istart'], map['iend']):
points, intensities = session.get_velo(j)
cloud = o3.PointCloud()
cloud.points = o3.Vector3dVector(points)
cloud.colors = o3.Vector3dVector(
util.intensity2color(intensities / 255.0))
cloud.transform(session.T_w_r_odo_velo[j])
accucloud.points.extend(cloud.points)
accucloud.colors.extend(cloud.colors)
o3.draw_geometries([accucloud, mapboundsvis] + polevis)
示例11: view_local_maps
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def view_local_maps(seq):
sequence = dataset.sequence(seq)
seqdir = os.path.join('kitti', '{:03d}'.format(seq))
with np.load(os.path.join(seqdir, localmapfile)) as data:
for i, map in enumerate(data['maps']):
T_w_m = sequence.poses[map['imid']].dot(T_cam0_mc).dot(T_mc_m)
mapboundsvis = util.create_wire_box(mapextent, [0.0, 0.0, 1.0])
mapboundsvis.transform(T_w_m)
polemap = []
for poleparams in map['poleparams']:
x, y, zs, ze, a, _ = poleparams
pole = util.create_wire_box(
[a, a, ze - zs], color=[1.0, 1.0, 0.0])
T_m_p = np.identity(4)
T_m_p[:3, 3] = [x - 0.5 * a, y - 0.5 * a, zs]
pole.transform(T_w_m.dot(T_m_p))
polemap.append(pole)
accucloud = o3.PointCloud()
for j in range(map['istart'], map['iend']):
velo = sequence.get_velo(j)
cloud = o3.PointCloud()
cloud.points = o3.Vector3dVector(velo[:, :3])
cloud.colors = o3.Vector3dVector(
util.intensity2color(velo[:, 3]))
cloud.transform(
sequence.poses[j].dot(sequence.calib.T_cam0_velo))
accucloud.points.extend(cloud.points)
accucloud.colors.extend(cloud.colors)
o3.draw_geometries([accucloud, mapboundsvis] + polemap)
示例12: initializePointCoud
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def initializePointCoud(pts):
pcd = o3d.PointCloud()
pcd.points = o3d.Vector3dVector(pts)
return pcd
示例13: initializePointCoud
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def initializePointCoud(pts):
pcd = o3.PointCloud()
pcd.points = o3.Vector3dVector(pts)
return pcd
示例14: from_carla_point_cloud
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def from_carla_point_cloud(cls, carla_pc, lidar_setup):
"""Creates a pylot point cloud from a carla point cloud.
Returns:
:py:class:`.PointCloud`: A point cloud.
"""
points = np.frombuffer(carla_pc.raw_data, dtype=np.dtype('f4'))
points = copy.deepcopy(points)
points = np.reshape(points, (int(points.shape[0] / 3), 3))
return cls(points, lidar_setup)
示例15: get_pixel_location
# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import PointCloud [as 別名]
def get_pixel_location(self, pixel, camera_setup):
""" Gets the 3D world location from pixel coordinates.
Args:
pixel (:py:class:`~pylot.utils.Vector2D`): Pixel coordinates.
camera_setup (:py:class:`~pylot.drivers.sensor_setup.CameraSetup`):
The setup of the camera.
Returns:
:py:class:`~pylot.utils.Location`: The 3D world location, or None
if all the point cloud points are behind.
"""
# Select only points that are in front.
fwd_points = self.points[np.where(self.points[:, 2] > 0.0)]
if len(fwd_points) == 0:
return None
intrinsic_mat = camera_setup.get_intrinsic_matrix()
# Project our 2D pixel location into 3D space, onto the z=1 plane.
p3d = np.dot(inv(intrinsic_mat), np.array([[pixel.x], [pixel.y],
[1.0]]))
if self._lidar_setup.lidar_type == 'sensor.lidar.ray_cast':
location = PointCloud.get_closest_point_in_point_cloud(
fwd_points, Vector2D(p3d[0], p3d[1]), normalized=True)
# Use the depth from the retrieved location.
p3d *= np.array([location.z])
p3d = p3d.transpose()
# Convert from camera to unreal coordinates if the lidar type is
# sensor.lidar.ray_cast
to_world_transform = camera_setup.get_unreal_transform()
camera_point_cloud = to_world_transform.transform_points(p3d)[0]
pixel_location = Location(camera_point_cloud[0],
camera_point_cloud[1],
camera_point_cloud[2])
elif self._lidar_setup.lidar_type == 'velodyne':
location = PointCloud.get_closest_point_in_point_cloud(
fwd_points, Vector2D(p3d[0], p3d[1]), normalized=False)
# Use the depth from the retrieved location.
p3d[2] = location.z
p3d = p3d.transpose()
pixel_location = Location(p3d[0, 0], p3d[0, 1], p3d[0, 2])
return pixel_location