當前位置: 首頁>>代碼示例>>Python>>正文


Python tf_grouping.knn_point方法代碼示例

本文整理匯總了Python中tf_grouping.knn_point方法的典型用法代碼示例。如果您正苦於以下問題:Python tf_grouping.knn_point方法的具體用法?Python tf_grouping.knn_point怎麽用?Python tf_grouping.knn_point使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tf_grouping的用法示例。


在下文中一共展示了tf_grouping.knn_point方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sample_and_group

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz 
開發者ID:mhsung,項目名稱:deep-functional-dictionaries,代碼行數:37,代碼來源:pointnet_util.py

示例2: sample_and_group

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''
    sample_idx = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, sample_idx) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, sample_idx, grouped_xyz 
開發者ID:xingyul,項目名稱:meteornet,代碼行數:37,代碼來源:net_utils.py

示例3: sample_and_group

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint:         int32
        radius:         float32
        nsample:        int32
        xyz:            (batch_size, ndataset, 3) TF tensor
        points:         (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn:            bool, if True use kNN instead of radius search
        use_xyz:        bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz:        (batch_size, npoint, 3) TF tensor
        new_points:     (batch_size, npoint, nsample, 3+channel) TF tensor
        idx:            (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz:    (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))         # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)

    grouped_xyz = group_point(xyz, idx)                                     # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1])     # translation normalization

    if points is not None:
        grouped_points = group_point(points, idx)                           # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz 
開發者ID:hehefan,項目名稱:PointRNN,代碼行數:38,代碼來源:pointnet2.py

示例4: pc_sampling

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def pc_sampling(xyz,
                feat,
                nsample,
                num_point,
                scope='sampling'):
  """ Fully connected layer with non-linear operation.
  
  Args:
    xyz: 3-D tensor B x N x 3
    nsample: k
    num_point: N2
    feat: 3-D tensor B x N x C
  
  Returns:
    feat_sample: 3-D tensor B x N2 x C
  """
  with tf.variable_scope(scope) as sc:
    xyz_new = gather_point(xyz, farthest_point_sample(num_point, xyz))
    _, idx_pooling = knn_point(nsample, xyz, xyz_new)
    
    grouped_points = group_point(feat, idx_pooling)
    feat_sample = tf.nn.max_pool(grouped_points, [1,1,nsample,1], [1,1,1,1], 
    			padding='VALID', data_format='NHWC', name="MAX_POOLING")
    feat_sample = tf.squeeze(feat_sample, axis=[2])

    return feat_sample, xyz_new 
開發者ID:xyf513,項目名稱:SpiderCNN,代碼行數:28,代碼來源:tf_util.py

示例5: sample_and_group

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz 
開發者ID:ericyi,項目名稱:GSPN,代碼行數:40,代碼來源:pointnet_util.py

示例6: sample_and_group

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz))  # (batch_size, npoint, 3)
    if knn:
        _, idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx)  # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1])  # translation normalization
    if points is not None:
        grouped_points = group_point(points, idx)  # (batch_size, npoint, nsample, channel)
        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1)  # (batch_size, npoint, nample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz 
開發者ID:dlinzhao,項目名稱:JSNet,代碼行數:37,代碼來源:pointnet_util.py

示例7: flow_embedding_module

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def flow_embedding_module(xyz1, xyz2, feat1, feat2, radius, nsample, mlp, is_training, bn_decay, scope, bn=True, pooling='max', knn=True, corr_func='elementwise_product'):
    """
    Input:
        xyz1: (batch_size, npoint, 3)
        xyz2: (batch_size, npoint, 3)
        feat1: (batch_size, npoint, channel)
        feat2: (batch_size, npoint, channel)
    Output:
        xyz1: (batch_size, npoint, 3)
        feat1_new: (batch_size, npoint, mlp[-1])
    """
    if knn:
        _, idx = knn_point(nsample, xyz2, xyz1)
    else:
        idx, cnt = query_ball_point(radius, nsample, xyz2, xyz1)
        _, idx_knn = knn_point(nsample, xyz2, xyz1)
        cnt = tf.tile(tf.expand_dims(cnt, -1), [1,1,nsample])
        idx = tf.where(cnt > (nsample-1), idx, idx_knn)

    xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint, nsample, 3
    xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint, 1, 3
    xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint, nsample, 3

    feat2_grouped = group_point(feat2, idx) # batch_size, npoint, nsample, channel
    feat1_expanded = tf.expand_dims(feat1, 2) # batch_size, npoint, 1, channel
    # TODO: change distance function
    if corr_func == 'elementwise_product':
        feat_diff = feat2_grouped * feat1_expanded # batch_size, npoint, nsample, channel
    elif corr_func == 'concat':
        feat_diff = tf.concat(axis=-1, values=[feat2_grouped, tf.tile(feat1_expanded,[1,1,nsample,1])]) # batch_size, npoint, sample, channel*2
    elif corr_func == 'dot_product':
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
    elif corr_func == 'cosine_dist':
        feat2_grouped = tf.nn.l2_normalize(feat2_grouped, -1)
        feat1_expanded = tf.nn.l2_normalize(feat1_expanded, -1)
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
    elif corr_func == 'flownet_like': # assuming square patch size k = 0 as the FlowNet paper
        batch_size = xyz1.get_shape()[0].value
        npoint = xyz1.get_shape()[1].value
        feat_diff = tf.reduce_sum(feat2_grouped * feat1_expanded, axis=[-1], keep_dims=True) # batch_size, npoint, nsample, 1
        total_diff = tf.concat(axis=-1, values=[xyz_diff, feat_diff]) # batch_size, npoint, nsample, 4
        feat1_new = tf.reshape(total_diff, [batch_size, npoint, -1]) # batch_size, npoint, nsample*4
        #feat1_new = tf.concat(axis=[-1], values=[feat1_new, feat1]) # batch_size, npoint, nsample*4+channel
        return xyz1, feat1_new


    feat1_new = tf.concat([feat_diff, xyz_diff], axis=3) # batch_size, npoint, nsample, [channel or 1] + 3
    # TODO: move scope to outer indent
    with tf.variable_scope(scope) as sc:
        for i, num_out_channel in enumerate(mlp):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training,
                                       scope='conv_diff_%d'%(i), bn_decay=bn_decay)
    if pooling=='max':
        feat1_new = tf.reduce_max(feat1_new, axis=[2], keep_dims=False, name='maxpool_diff')
    elif pooling=='avg':
        feat1_new = tf.reduce_mean(feat1_new, axis=[2], keep_dims=False, name='avgpool_diff')
    return xyz1, feat1_new 
開發者ID:xingyul,項目名稱:meteornet,代碼行數:61,代碼來源:net_utils.py

示例8: set_upconv_module

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training, scope, bn_decay=None, bn=True, pooling='max', radius=None, knn=True):
    """
        Feature propagation from xyz2 (less points) to xyz1 (more points)
    Inputs:
        xyz1: (batch_size, npoint1, 3)
        xyz2: (batch_size, npoint2, 3)
        feat1: (batch_size, npoint1, channel1) features for xyz1 points (earlier layers)
        feat2: (batch_size, npoint2, channel2) features for xyz2 points
    Output:
        feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)
        TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
    """
    with tf.variable_scope(scope) as sc:
        if knn:
            l2_dist, idx = knn_point(nsample, xyz2, xyz1)
        else:
            idx, pts_cnt = query_ball_point(radius, nsample, xyz2, xyz1)
        xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint1, nsample, 3
        xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint1, 1, 3
        xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint1, nsample, 3

        feat2_grouped = group_point(feat2, idx) # batch_size, npoint1, nsample, channel2
        net = tf.concat([feat2_grouped, xyz_diff], axis=3) # batch_size, npoint1, nsample, channel2+3

        if mlp is None: mlp=[]
        for i, num_out_channel in enumerate(mlp):
            net = tf_util.conv2d(net, num_out_channel, [1,1],
                                 padding='VALID', stride=[1,1],
                                 bn=True, is_training=is_training,
                                 scope='conv%d'%(i), bn_decay=bn_decay)
        if pooling=='max':
            feat1_new = tf.reduce_max(net, axis=[2], keep_dims=False, name='maxpool') # batch_size, npoint1, mlp[-1]
        elif pooling=='avg':
            feat1_new = tf.reduce_mean(net, axis=[2], keep_dims=False, name='avgpool') # batch_size, npoint1, mlp[-1]

        if feat1 is not None:
            feat1_new = tf.concat([feat1_new, feat1], axis=2) # batch_size, npoint1, mlp[-1]+channel1

        feat1_new = tf.expand_dims(feat1_new, 2) # batch_size, npoint1, 1, mlp[-1]+channel2
        if mlp2 is None: mlp2=[]
        for i, num_out_channel in enumerate(mlp2):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training,
                                       scope='post-conv%d'%(i), bn_decay=bn_decay)
        feat1_new = tf.squeeze(feat1_new, [2]) # batch_size, npoint1, mlp2[-1]
        return feat1_new 
開發者ID:xingyul,項目名稱:meteornet,代碼行數:49,代碼來源:net_utils.py

示例9: point_rnn

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def point_rnn(P1,
              P2,
              X1,
              S2,
              radius,
              nsample,
              out_channels,
              knn=False,
              pooling='max',
              scope='point_rnn'):
    """
    Input:
        P1:     (batch_size, npoint, 3)
        P2:     (batch_size, npoint, 3)
        X1:     (batch_size, npoint, feat_channels)
        S2:     (batch_size, npoint, out_channels)
    Output:
        S1:     (batch_size, npoint, out_channels)
    """
    # 1. Sample points
    if knn:
        _, idx = knn_point(nsample, P2, P1)
    else:
        idx, cnt = query_ball_point(radius, nsample, P2, P1)
        _, idx_knn = knn_point(nsample, P2, P1)
        cnt = tf.tile(tf.expand_dims(cnt, -1), [1, 1, nsample])
        idx = tf.where(cnt > (nsample-1), idx, idx_knn)

    # 2.1 Group P2 points
    P2_grouped = group_point(P2, idx)                       # batch_size, npoint, nsample, 3
    # 2.2 Group P2 states
    S2_grouped = group_point(S2, idx)                       # batch_size, npoint, nsample, out_channels

    # 3. Calcaulate displacements
    P1_expanded = tf.expand_dims(P1, 2)                     # batch_size, npoint, 1,       3
    displacement = P2_grouped - P1_expanded                 # batch_size, npoint, nsample, 3

    # 4. Concatenate X1, S2 and displacement
    if X1 is not None:
        X1_expanded = tf.tile(tf.expand_dims(X1, 2), [1, 1, nsample, 1])                # batch_size, npoint, sample,  feat_channels
        correlation = tf.concat([S2_grouped, X1_expanded], axis=3)                      # batch_size, npoint, nsample, feat_channels+out_channels
        correlation = tf.concat([correlation, displacement], axis=3)                    # batch_size, npoint, nsample, feat_channels+out_channels+3
    else:
        correlation = tf.concat([S2_grouped, displacement], axis=3)                     # batch_size, npoint, nsample, out_channels+3

    # 5. Fully-connected layer (the only parameters)
    with tf.variable_scope(scope) as sc:
        S1 = tf.layers.conv2d(inputs=correlation, filters=out_channels, kernel_size=1, strides=1, padding='valid', data_format='channels_last', activation=None, name='fc')

    # 6. Pooling
    if pooling=='max':
        return tf.reduce_max(S1, axis=[2], keepdims=False)
    elif pooling=='avg':
        return tf.reduce_mean(S1, axis=[2], keepdims=False) 
開發者ID:hehefan,項目名稱:PointRNN,代碼行數:56,代碼來源:pointrnn_cell_impl.py

示例10: get_model

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def get_model(xyz, is_training, bn_decay=None, num_classes=40):
    batch_size = xyz.get_shape()[0].value
    num_point = xyz.get_shape()[1].value

    nsample = 20
    G = 16
    taylor_channel = 5
    
    with tf.variable_scope('delta') as sc:
        _, idx = knn_point(nsample, xyz, xyz)
        
        grouped_xyz = group_point(xyz, idx)   
        point_cloud_tile = tf.expand_dims(xyz, [2])
        point_cloud_tile = tf.tile(point_cloud_tile, [1, 1, nsample, 1])
        delta = grouped_xyz - point_cloud_tile

    with tf.variable_scope('fanConv1') as sc:
        feat_1 = tf_util.spiderConv(xyz, idx, delta, 32, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)

    with tf.variable_scope('fanConv2') as sc:
        feat_2 = tf_util.spiderConv(feat_1, idx, delta, 64, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)

    with tf.variable_scope('fanConv3') as sc:
        feat_3 = tf_util.spiderConv(feat_2, idx, delta, 128, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)

    with tf.variable_scope('fanConv4') as sc:
        feat_4 = tf_util.spiderConv(feat_3, idx, delta, 256, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)


    feat = tf.concat([feat_1, feat_2, feat_3, feat_4], 2)
    
    #top-k pooling
    net = tf_util.topk_pool(feat, k = 2, scope='topk_pool')
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay, is_multi_GPU=True)
    net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay, is_multi_GPU=True)
    net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net 
開發者ID:xyf513,項目名稱:SpiderCNN,代碼行數:51,代碼來源:spidercnn_cls_xyz.py

示例11: get_model

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def get_model(xyz_withnor, is_training, bn_decay=None, num_classes=40):
    batch_size = xyz_withnor.get_shape()[0].value
    num_point = xyz_withnor.get_shape()[1].value

    K_knn = 20
    taylor_channel = 3

    xyz = xyz_withnor[:, :, 0:3]
    
    with tf.variable_scope('delta') as sc:
        _, idx = knn_point(K_knn, xyz, xyz)
        
        grouped_xyz = group_point(xyz, idx)   
        point_cloud_tile = tf.expand_dims(xyz, [2])
        point_cloud_tile = tf.tile(point_cloud_tile, [1, 1, K_knn, 1])
        delta = grouped_xyz - point_cloud_tile

    with tf.variable_scope('SpiderConv1') as sc:
        feat_1 = tf_util.spiderConv(xyz_withnor, idx, delta, 32, taylor_channel = taylor_channel, 
                                        bn=True, is_training=is_training, bn_decay=bn_decay)

    with tf.variable_scope('SpiderConv2') as sc:
        feat_2 = tf_util.spiderConv(feat_1, idx, delta, 64, taylor_channel = taylor_channel, 
                                        bn=True, is_training=is_training, bn_decay=bn_decay)

    with tf.variable_scope('SpiderConv3') as sc:
        feat_3 = tf_util.spiderConv(feat_2, idx, delta, 128, taylor_channel = taylor_channel, 
                                        bn=True, is_training=is_training, bn_decay=bn_decay)

    with tf.variable_scope('SpiderConv4') as sc:
        feat_4 = tf_util.spiderConv(feat_3, idx, delta, 256, taylor_channel = taylor_channel, 
                                        bn=True, is_training=is_training, bn_decay=bn_decay)


    feat = tf.concat([feat_1, feat_2, feat_3, feat_4], 2)

    #top-k pooling
    net = tf_util.topk_pool(feat, k = 2, scope='topk_pool')
    
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay)
    net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, 40, activation_fn=None, scope='fc3')

    return net 
開發者ID:xyf513,項目名稱:SpiderCNN,代碼行數:53,代碼來源:spidercnn_cls.py

示例12: get_model

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def get_model(xyz, is_training, bn_decay=None, num_class=NUM_CLASSES):
    batch_size = xyz.get_shape()[0].value
    num_point = xyz.get_shape()[1].value

    nsample = 20
    G = 16
    taylor_channel = 5
    
    with tf.variable_scope('delta') as sc:
        _, idx = knn_point(nsample, xyz, xyz)
        
        grouped_xyz = group_point(xyz, idx)   
        point_cloud_tile = tf.expand_dims(xyz, [2])
        point_cloud_tile = tf.tile(point_cloud_tile, [1, 1, nsample, 1])
        delta = grouped_xyz - point_cloud_tile

    with tf.variable_scope('fanConv1') as sc:
        feat_1 = tf_util.spiderConv(xyz, idx, delta, 32, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)

    with tf.variable_scope('fanConv2') as sc:
        feat_2 = tf_util.spiderConv(feat_1, idx, delta, 64, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)

    with tf.variable_scope('fanConv3') as sc:
        feat_3 = tf_util.spiderConv(feat_2, idx, delta, 128, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)

    with tf.variable_scope('fanConv4') as sc:
        feat_4 = tf_util.spiderConv(feat_3, idx, delta, 256, taylor_channel = taylor_channel, 
                                        gn=True, G=G, is_multi_GPU=True)


    feat = tf.concat([feat_1, feat_2, feat_3, feat_4], 2)
    
    #top-k pooling
    net = tf_util.topk_pool(feat, k = 2, scope='topk_pool')
    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,
                                  scope='fc1', bn_decay=bn_decay, is_multi_GPU=True)
    net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training,
                          scope='dp1')
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
                                  scope='fc2', bn_decay=bn_decay, is_multi_GPU=True)
    net = tf_util.dropout(net, keep_prob=0.3, is_training=is_training,
                          scope='dp2')
    net = tf_util.fully_connected(net, num_class, activation_fn=None, scope='fc3')

    return net 
開發者ID:hkust-vgd,項目名稱:scanobjectnn,代碼行數:51,代碼來源:spidercnn_cls_xyz.py

示例13: set_upconv_module

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def set_upconv_module(xyz1, xyz2, feat1, feat2, nsample, mlp, mlp2, is_training, scope, bn_decay=None, bn=True, pooling='max', radius=None, knn=True):
    """
        Feature propagation from xyz2 (less points) to xyz1 (more points)

    Inputs:
        xyz1: (batch_size, npoint1, 3)
        xyz2: (batch_size, npoint2, 3)
        feat1: (batch_size, npoint1, channel1) features for xyz1 points (earlier layers)
        feat2: (batch_size, npoint2, channel2) features for xyz2 points
    Output:
        feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)

        TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.
    """
    with tf.variable_scope(scope) as sc:
        if knn:
            l2_dist, idx = knn_point(nsample, xyz2, xyz1)
        else:
            idx, pts_cnt = query_ball_point(radius, nsample, xyz2, xyz1)
        xyz2_grouped = group_point(xyz2, idx) # batch_size, npoint1, nsample, 3
        xyz1_expanded = tf.expand_dims(xyz1, 2) # batch_size, npoint1, 1, 3
        xyz_diff = xyz2_grouped - xyz1_expanded # batch_size, npoint1, nsample, 3

        feat2_grouped = group_point(feat2, idx) # batch_size, npoint1, nsample, channel2
        net = tf.concat([feat2_grouped, xyz_diff], axis=3) # batch_size, npoint1, nsample, channel2+3

        if mlp is None: mlp=[]
        for i, num_out_channel in enumerate(mlp):
            net = tf_util.conv2d(net, num_out_channel, [1,1],
                                 padding='VALID', stride=[1,1],
                                 bn=True, is_training=is_training,
                                 scope='conv%d'%(i), bn_decay=bn_decay)
        if pooling=='max':
            feat1_new = tf.reduce_max(net, axis=[2], keep_dims=False, name='maxpool') # batch_size, npoint1, mlp[-1]
        elif pooling=='avg':
            feat1_new = tf.reduce_mean(net, axis=[2], keep_dims=False, name='avgpool') # batch_size, npoint1, mlp[-1]

        if feat1 is not None:
            feat1_new = tf.concat([feat1_new, feat1], axis=2) # batch_size, npoint1, mlp[-1]+channel1

        feat1_new = tf.expand_dims(feat1_new, 2) # batch_size, npoint1, 1, mlp[-1]+channel2
        if mlp2 is None: mlp2=[]
        for i, num_out_channel in enumerate(mlp2):
            feat1_new = tf_util.conv2d(feat1_new, num_out_channel, [1,1],
                                       padding='VALID', stride=[1,1],
                                       bn=True, is_training=is_training,
                                       scope='post-conv%d'%(i), bn_decay=bn_decay)
        feat1_new = tf.squeeze(feat1_new, [2]) # batch_size, npoint1, mlp2[-1]
        return feat1_new 
開發者ID:xingyul,項目名稱:flownet3d,代碼行數:51,代碼來源:pointnet_util.py

示例14: sample_and_group

# 需要導入模塊: import tf_grouping [as 別名]
# 或者: from tf_grouping import knn_point [as 別名]
def sample_and_group(npoint, radius, nsample, xyz, points, tnet_spec=None, knn=False, use_xyz=True, centralize_points=False):
    '''
    Input:
        npoint: int32
        radius: float32
        nsample: int32
        xyz: (batch_size, ndataset, 3) TF tensor
        points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points
        tnet_spec: dict (keys: mlp, mlp2, is_training, bn_decay), if None do not apply tnet
        knn: bool, if True use kNN instead of radius search
        use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features
    Output:
        new_xyz: (batch_size, npoint, 3) TF tensor
        new_points: (batch_size, npoint, nsample, 3+channel) TF tensor
        idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points
        grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs
            (subtracted by seed point XYZ) in local regions
    '''

    fpsidx = farthest_point_sample(npoint, xyz)
    new_xyz = gather_point(xyz, fpsidx) # (batch_size, npoint, 3)
    if knn:
        _,idx = knn_point(nsample, xyz, new_xyz)
    else:
        idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)
    grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)
    grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1,1,nsample,1]) # translation normalization
    if tnet_spec is not None:
        grouped_xyz = tnet(grouped_xyz, tnet_spec)
    if points is not None:
        grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)

        if centralize_points:
            central_points = gather_point(points[:,:,:3], fpsidx)
            grouped_points = tf.concat((grouped_points[:,:,:,:3]-tf.tile(tf.expand_dims(central_points, 2), [1,1,nsample,1]),grouped_points[:,:,:,3:]),-1)

        if use_xyz:
            new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nsample, 3+channel)
        else:
            new_points = grouped_points
    else:
        new_points = grouped_xyz

    return new_xyz, new_points, idx, grouped_xyz 
開發者ID:ericyi,項目名稱:articulated-part-induction,代碼行數:46,代碼來源:pointnet_util.py


注:本文中的tf_grouping.knn_point方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。