當前位置: 首頁>>代碼示例>>Python>>正文


Python open3d.TransformationEstimationPointToPoint方法代碼示例

本文整理匯總了Python中open3d.TransformationEstimationPointToPoint方法的典型用法代碼示例。如果您正苦於以下問題:Python open3d.TransformationEstimationPointToPoint方法的具體用法?Python open3d.TransformationEstimationPointToPoint怎麽用?Python open3d.TransformationEstimationPointToPoint使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在open3d的用法示例。


在下文中一共展示了open3d.TransformationEstimationPointToPoint方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: icp_o3_gicp_fast

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def icp_o3_gicp_fast(file_idx, cfg, refine=None, refine_radius=0.05, precomputed_results=None):
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    voxel_size = 0.05
    distance_threshold = voxel_size * 0.5
    start = time.time()
    if precomputed_results is None:
        source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(pc1, pc2, voxel_size)
        reg_res = o3.registration_fast_based_on_feature_matching(source_down, target_down, source_fpfh, target_fpfh, o3.FastGlobalRegistrationOption(with_constraint=cfg.evaluation.special.icp.with_constraint, maximum_correspondence_distance=distance_threshold))
        transformation = reg_res.transformation
    else:
        precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
        transformation = get_mat_angle(precomp_pred_translation, precomp_pred_angle, precomp_pred_center)

    if refine is None:
        time_elapsed = time.time() - start
        return transformation, pc1_centroid, time_elapsed
    else:
        if refine == 'p2p':
            reg_p2p = o3.registration_icp(pc1, pc2, refine_radius, transformation, o3.TransformationEstimationPointToPoint(with_constraint=cfg.evaluation.special.icp.with_constraint, with_scaling=False))
            time_elapsed = time.time() - start
            return reg_p2p.transformation, pc1_centroid, time_elapsed
        else:
            assert False 
開發者ID:grossjohannes,項目名稱:AlignNet-3D,代碼行數:25,代碼來源:icp.py

示例2: open3d_icp

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def open3d_icp(src, trgt, init_rotation=np.eye(3, 3)):
    source = open3d.PointCloud()
    source.points = open3d.Vector3dVector(src)

    target = open3d.PointCloud()
    target.points = open3d.Vector3dVector(trgt)

    init_rotation_4x4 = np.eye(4, 4)
    init_rotation_4x4[0:3, 0:3] = init_rotation

    threshold = 0.2
    reg_p2p = open3d.registration_icp(source, target, threshold, init_rotation_4x4,
                                    open3d.TransformationEstimationPointToPoint())

    return reg_p2p 
開發者ID:eldar,項目名稱:differentiable-point-clouds,代碼行數:17,代碼來源:compute_alignment.py

示例3: _do_icp_p2p

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def _do_icp_p2p(self, pc1, pc2, radius, init, constrain):
        return o3.registration_icp(pc1, pc2, radius, init, o3.TransformationEstimationPointToPoint(with_constraint=constrain, with_scaling=False)) 
開發者ID:grossjohannes,項目名稱:AlignNet-3D,代碼行數:4,代碼來源:pointcloud.py

示例4: _do_icp_global

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def _do_icp_global(self, pc1, pc2, constrain):
        voxel_size = 0.05
        source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(pc1, pc2, voxel_size)
        distance_threshold = voxel_size * 1.5
        result = o3.registration_ransac_based_on_feature_matching(source_down, target_down, source_fpfh, target_fpfh, distance_threshold, o3.TransformationEstimationPointToPoint(with_constraint=constrain, with_scaling=False), 4, [o3.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3.CorrespondenceCheckerBasedOnDistance(distance_threshold)], o3.RANSACConvergenceCriteria(4000000, 500))
        return result 
開發者ID:grossjohannes,項目名稱:AlignNet-3D,代碼行數:8,代碼來源:pointcloud.py

示例5: icp_p2point

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def icp_p2point(file_idx, cfg, radius=0.2, its=30, init=None, with_constraint=None):
    with_constraint = with_constraint if with_constraint is not None else cfg.evaluation.special.icp.with_constraint
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    if init is None:
        #  init = get_median_init(pc1, pc2)
        init = get_centroid_init(pc1, pc2)
    start = time.time()
    reg_p2p = o3.registration_icp(pc1, pc2, radius, init, o3.TransformationEstimationPointToPoint(with_constraint=with_constraint, with_scaling=False), o3.registration.ICPConvergenceCriteria(max_iteration=its))  # Default: 30
    time_elapsed = time.time() - start
    return reg_p2p.transformation, pc1_centroid, time_elapsed 
開發者ID:grossjohannes,項目名稱:AlignNet-3D,代碼行數:12,代碼來源:icp.py

示例6: icp_o3_gicp

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def icp_o3_gicp(file_idx, cfg, refine=None, refine_radius=0.05, precomputed_results=None):
    pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
    voxel_size = 0.05
    start = time.time()
    if precomputed_results is None:
        distance_threshold = voxel_size * 1.5
        source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(pc1, pc2, voxel_size)
        reg_res = o3.registration_ransac_based_on_feature_matching(
            source_down,
            target_down,
            source_fpfh,
            target_fpfh,
            distance_threshold,
            o3.TransformationEstimationPointToPoint(with_constraint=cfg.evaluation.special.icp.with_constraint, with_scaling=False),
            4,  # scaling=False
            [o3.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
            o3.RANSACConvergenceCriteria(4000000, 500))
        transformation = reg_res.transformation
    else:
        precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
        transformation = get_mat_angle(precomp_pred_translation, precomp_pred_angle, precomp_pred_center)

    if refine is None:
        time_elapsed = time.time() - start
        return transformation, pc1_centroid, time_elapsed
    else:
        if refine == 'p2p':
            reg_p2p = o3.registration_icp(pc1, pc2, refine_radius, transformation, o3.TransformationEstimationPointToPoint(with_constraint=cfg.evaluation.special.icp.with_constraint, with_scaling=False))
            #  if file_idx == 8019:
            #  print('->', reg_p2p.transformation)
            time_elapsed = time.time() - start
            return reg_p2p.transformation, pc1_centroid, time_elapsed
        else:
            assert False 
開發者ID:grossjohannes,項目名稱:AlignNet-3D,代碼行數:36,代碼來源:icp.py

示例7: refine_registration

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def refine_registration(source, target, corrs):


    start = time.time()

    p2p = o3d.TransformationEstimationPointToPoint()
    result = p2p.compute_transformation(source, target, o3d.Vector2iVector(corrs))

    elapsed_time = time.time() - start

    return result, elapsed_time 
開發者ID:goncalo120,項目名稱:3DRegNet,代碼行數:13,代碼來源:setupPly.py

示例8: refine_registration

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def refine_registration(source, target, corrs):


    start = time.time()

    p2p = o3.TransformationEstimationPointToPoint()
    result = p2p.compute_transformation(source, target, o3.Vector2iVector(corrs))

    elapsed_time = time.time() - start

    return result, elapsed_time 
開發者ID:goncalo120,項目名稱:3DRegNet,代碼行數:13,代碼來源:registration.py

示例9: execute_global_registration

# 需要導入模塊: import open3d [as 別名]
# 或者: from open3d import TransformationEstimationPointToPoint [as 別名]
def execute_global_registration(src_keypts, tgt_keypts, src_desc, tgt_desc, distance_threshold):
    result = open3d.registration_ransac_based_on_feature_matching(
        src_keypts, tgt_keypts, src_desc, tgt_desc,
        distance_threshold,
        open3d.TransformationEstimationPointToPoint(False), 4,
        [open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
         open3d.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
        open3d.RANSACConvergenceCriteria(4000000, 500))
    return result 
開發者ID:XuyangBai,項目名稱:D3Feat,代碼行數:11,代碼來源:demo_registration.py


注:本文中的open3d.TransformationEstimationPointToPoint方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。