当前位置: 首页>>代码示例>>Python>>正文


Python DataArray.drop方法代码示例

本文整理汇总了Python中xarray.DataArray.drop方法的典型用法代码示例。如果您正苦于以下问题:Python DataArray.drop方法的具体用法?Python DataArray.drop怎么用?Python DataArray.drop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在xarray.DataArray的用法示例。


在下文中一共展示了DataArray.drop方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: DoNotCooccur

# 需要导入模块: from xarray import DataArray [as 别名]
# 或者: from xarray.DataArray import drop [as 别名]
class DoNotCooccur(HACConstraint):
    """Do NOT merge co-occurring face tracks"""

    def initialize(self, parent=None):

        current_state = parent.current_state
        clusters = [cluster for cluster in current_state.labels()]
        n_clusters = len(clusters)

        self._cooccur = DataArray(
            np.zeros((n_clusters, n_clusters)),
            [('i', clusters), ('j', clusters)])

        for (segment1, track1), (segment2, track2) in current_state.co_iter(current_state):
            i = current_state[segment1, track1]
            j = current_state[segment2, track2]
            if i == j:
                continue
            self._cooccur.loc[i, j] = 1
            self._cooccur.loc[j, i] = 1

    def mergeable(self, clusters, parent=None):
        return self._cooccur.loc[clusters, clusters].sum().item() == 0.

    def update(self, merged_clusters, new_cluster, parent=None):

        # clusters that will be removed
        _clusters = list(set(merged_clusters) - set([new_cluster]))

        # update coooccurrence matrix
        self._cooccur.loc[new_cluster, :] += self._cooccur.loc[_clusters, :].sum(dim='i')
        self._cooccur.loc[:, new_cluster] += self._cooccur.loc[:, _clusters].sum(dim='j')

        # remove clusters
        self._cooccur = self._cooccur.drop(_clusters, dim='i').drop(_clusters, dim='j')
开发者ID:pyannote,项目名称:pyannote-algorithms,代码行数:37,代码来源:constraint.py

示例2: CloseInTime

# 需要导入模块: from xarray import DataArray [as 别名]
# 或者: from xarray.DataArray import drop [as 别名]
class CloseInTime(HACConstraint):

    def __init__(self, closer_than=30.0):
        super(CloseInTime, self).__init__()
        self.closer_than = closer_than

    def initialize(self, parent=None):

        current_state = parent.current_state
        extended = current_state.empty()
        for segment, track, cluster in current_state.itertracks(label=True):
            extended_segment = Segment(segment.start - 0.5 * self.closer_than,
                                       segment.end + 0.5 * self.closer_than)
            extended[extended_segment, track] = cluster

        clusters = [cluster for cluster in current_state.labels()]
        n_clusters = len(clusters)

        self._neighbours = DataArray(
            np.zeros((n_clusters, n_clusters)),
            [('i', clusters), ('j', clusters)])

        for (segment1, track1), (segment2, track2) in extended.co_iter(extended):
            i = extended[segment1, track1]
            j = extended[segment2, track2]
            self._neighbours.loc[i, j] += 1
            self._neighbours.loc[j, i] += 1

    def update(self, merged_clusters, new_cluster, parent=None):

        # clusters that will be removed
        _clusters = list(set(merged_clusters) - set([new_cluster]))

        # update coooccurrence matrix
        self._neighbours.loc[new_cluster, :] += self._neighbours.loc[_clusters, :].sum(dim='i')
        self._neighbours.loc[:, new_cluster] += self._neighbours.loc[:, _clusters].sum(dim='j')

        # remove clusters
        self._neighbours = self._neighbours.drop(_clusters, dim='i').drop(_clusters, dim='j')

    def mergeable(self, clusters, parent=None):
        return connected_components(
            self._neighbours.loc[clusters, clusters],
            directed=False, return_labels=False) == 1
开发者ID:pyannote,项目名称:pyannote-algorithms,代码行数:46,代码来源:constraint.py

示例3: HACModel

# 需要导入模块: from xarray import DataArray [as 别名]
# 或者: from xarray.DataArray import drop [as 别名]

#.........这里部分代码省略.........
                    # compute similarity if (and only if) clusters are mergeable
                    if not parent.constraint.mergeable([i, j], parent=parent):
                        continue

                    similarity = self.compute_similarity(i, j, parent=parent)

                    self._similarity.loc[i, j] = similarity
                    self._similarity.loc[j, i] = similarity
            else:
                for i, j in product(clusters, repeat=2):

                    # compute similarity if (and only if) clusters are mergeable
                    if not parent.constraint.mergeable([i, j], parent=parent):
                        continue

                    similarity = self.compute_similarity(i, j, parent=parent)

                    self._similarity.loc[i, j] = similarity

    # NOTE - for now this (get_candidates / block) combination assumes
    # that we merge clusters two-by-two...

    def get_candidates(self, parent=None):
        """
        Returns
        -------
        clusters : tuple
        similarity : float

        """
        _, n_j = self._similarity.shape
        ij = np.argmax(self._similarity.data)
        i = ij // n_j
        j = ij % n_j

        similarity = self._similarity[i, j].item()
        clusters = [self._similarity.coords['i'][i].item(),
                    self._similarity.coords['j'][j].item()]

        return clusters, similarity

    def block(self, clusters, parent=None):
        if len(clusters) > 2:
            raise NotImplementedError(
                'Constrained clustering merging 3+ clusters is not supported.'
            )
        i, j = clusters
        self._similarity.loc[i, j] = -np.inf
        self._similarity.loc[j, i] = -np.inf

    def update(self, merged_clusters, into, parent=None):

        # compute merged model
        self._models[into] = self.compute_merged_model(merged_clusters,
                                                       parent=parent)

        # remove old models and corresponding similarity
        removed_clusters = list(set(merged_clusters) - set([into]))
        for cluster in removed_clusters:
            del self._models[cluster]
        self._similarity = self._similarity.drop(removed_clusters, dim='i').drop(removed_clusters, dim='j')

        # compute new similarities
        # * all at once if model implements compute_similarities
        # * one by one otherwise

        remaining_clusters = list(set(self._models) - set([into]))

        try:

            if remaining_clusters:
                # all at once (when available)
                similarity = self.compute_similarities(
                    into, remaining_clusters, dim='j', parent=parent)
                self._similarity.loc[into, remaining_clusters] = similarity
                if self.is_symmetric:
                    similarity = similarity.rename({'j': 'i'})
                else:
                    similarity = self.compute_similarities(
                        into, remaining_clusters, dim='i', parent=parent)
                self._similarity.loc[remaining_clusters, into] = similarity

        except NotImplementedError as e:

            if remaining_clusters:
                self._similarity.loc[into, remaining_clusters] = -np.inf
                self._similarity.loc[remaining_clusters, into] = -np.inf

            for cluster in remaining_clusters:

                # compute similarity if (and only if) clusters are mergeable
                if parent.constraint.mergeable([into, cluster], parent=parent):
                    similarity = self.compute_similarity(
                        into, cluster, parent=parent)
                    self._similarity.loc[into, cluster] = similarity

                    if not self.is_symmetric:
                        similarity = self.compute_similarity(
                            cluster, into, parent=parent)
                    self._similarity.loc[cluster, into] = similarity
开发者ID:pyannote,项目名称:pyannote-algorithms,代码行数:104,代码来源:model.py


注:本文中的xarray.DataArray.drop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。