当前位置: 首页>>代码示例>>Python>>正文


Python vq.vq方法代码示例

本文整理汇总了Python中scipy.cluster.vq.vq方法的典型用法代码示例。如果您正苦于以下问题:Python vq.vq方法的具体用法?Python vq.vq怎么用?Python vq.vq使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.cluster.vq的用法示例。


在下文中一共展示了vq.vq方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Kmeans

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def Kmeans(file, vocabfile, k):
  np.random.seed((1000,2000))
  whitened = whiten(embeddings)
  codebook, distortion = kmeans(whitened, k)
  clusters = [l2_nearest(embeddings, c, representatives+1) for c in codebook]
  # output
  print(len(codebook), distortion)
  for centroid in codebook:
    print(' '.join([str(x) for x in centroid]))
  print()
  for cluster in clusters:
    print(' '.join([id_word[i] for i, d in cluster]).encode('utf-8'))
  print()
  # assign clusters to words
  codes, _ = vq(embeddings, codebook)
  for w, c in zip(word_id.keys(), codes):
    print(w, c) 
开发者ID:attardi,项目名称:deepnl,代码行数:19,代码来源:knn.py

示例2: test_vq

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def test_vq(self):
        initc = np.concatenate(([[X[0]], [X[1]], [X[2]]]))
        if TESTC:
            label1, dist = _vq.vq(X, initc)
            assert_array_equal(label1, LABEL1)
            tlabel1, tdist = vq(X, initc)
        else:
            print("== not testing C imp of vq ==")

    #def test_py_vq_1d(self):
    #    """Test special rank 1 vq algo, python implementation."""
    #    data = X[:, 0]
    #    initc = data[:3]
    #    a, b = _py_vq_1d(data, initc)
    #    ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
    #    assert_array_equal(a, ta)
    #    assert_array_equal(b, tb) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:19,代码来源:test_vq.py

示例3: python_vq

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def python_vq(all_data,code_book):
    import time
    t1 = time.time()
    codes1,dist1 = vq.vq(all_data,code_book)
    t2 = time.time()
    #print 'fast (double):', t2 - t1
    #print '  first codes:', codes1[:5]
    #print '  first dist:', dist1[:5]
    #print '  last codes:', codes1[-5:]
    #print '  last dist:', dist1[-5:]
    float_obs = all_data.astype(np.float32)
    float_code = code_book.astype(np.float32)
    t1 = time.time()
    codes1,dist1 = vq.vq(float_obs,float_code)
    t2 = time.time()
    #print 'fast (float):', t2 - t1
    #print '  first codes:', codes1[:5]
    #print '  first dist:', dist1[:5]
    #print '  last codes:', codes1[-5:]
    #print '  last dist:', dist1[-5:]

    return codes1,dist1 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:24,代码来源:vq_test.py

示例4: cluster_lon_lats

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def cluster_lon_lats(self):
        """Clusters the list of lon_lats into groups """
        np_lon_lats = []
        for lon_lat in self.lon_lats:
            dpoint = np.fromiter(lon_lat, np.dtype('float'))
            np_lon_lats.append(dpoint)
        data = array(np_lon_lats)
        centroids, _ = kmeans(data, self.number_clusters)
        idx, _ = vq(data, centroids)
        self.idx = idx
        self.data = data
        self.centroids = centroids
        # Sort the centroids by lon, then lat
        sc = centroids[centroids[:,1].argsort()]
        sc = sc[sc[:,0].argsort()]
        self.sorted_centroids = sc.tolist() 
开发者ID:ekansa,项目名称:open-context-py,代码行数:18,代码来源:clustergeojson.py

示例5: encode

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def encode(self, vecs):
        """Encode input vectors into PQ-codes.

        Args:
            vecs (np.ndarray): Input vectors with shape=(N, D) and dtype=np.float32.

        Returns:
            np.ndarray: PQ codes with shape=(N, M) and dtype=self.code_dtype

        """
        assert vecs.dtype == np.float32
        assert vecs.ndim == 2
        N, D = vecs.shape
        assert D == self.Ds * self.M, "input dimension must be Ds * M"

        # codes[n][m] : code of n-th vec, m-th subspace
        codes = np.empty((N, self.M), dtype=self.code_dtype)
        for m in range(self.M):
            if self.verbose:
                print("Encoding the subspace: {} / {}".format(m, self.M))
            vecs_sub = vecs[:, m * self.Ds : (m+1) * self.Ds]
            codes[:, m], _ = vq(vecs_sub, self.codewords[m])

        return codes 
开发者ID:matsui528,项目名称:nanopq,代码行数:26,代码来源:pq.py

示例6: apply_palette

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def apply_palette(img, palette, options):

    '''Apply the pallete to the given image. The first step is to set all
background pixels to the background color; then, nearest-neighbor
matching is used to map each foreground color to the closest one in
the palette.

    '''

    if not options.quiet:
        print('  applying palette...')

    bg_color = palette[0]

    fg_mask = get_fg_mask(bg_color, img, options)

    orig_shape = img.shape

    pixels = img.reshape((-1, 3))
    fg_mask = fg_mask.flatten()

    num_pixels = pixels.shape[0]

    labels = np.zeros(num_pixels, dtype=np.uint8)

    labels[fg_mask], _ = vq(pixels[fg_mask], palette)

    return labels.reshape(orig_shape[:-1])

###################################################################### 
开发者ID:mzucker,项目名称:noteshrink,代码行数:32,代码来源:noteshrink.py

示例7: test_vq_1d

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def test_vq_1d(self):
        """Test special rank 1 vq algo, python implementation."""
        data = X[:, 0]
        initc = data[:3]
        if TESTC:
            a, b = _vq.vq(data, initc)
            ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
            assert_array_equal(a, ta)
            assert_array_equal(b, tb)
        else:
            print("== not testing C imp of vq (rank 1) ==") 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:13,代码来源:test_vq.py

示例8: test__vq_sametype

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def test__vq_sametype(self):
        if TESTC:
            a = np.array([1, 2])
            b = a.astype(float)
            assert_raises(ValueError, _vq.vq, a, b) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:7,代码来源:test_vq.py

示例9: run_kmeans

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def run_kmeans(self, X, K):
        """Runs k-means and returns the labels assigned to the data."""
        wX = vq.whiten(X)
        means, dist = vq.kmeans(wX, K, iter=100)
        labels, dist = vq.vq(wX, means)
        return means, labels 
开发者ID:urinieto,项目名称:msaf,代码行数:8,代码来源:xmeans.py

示例10: compute_bic

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def compute_bic(self, D, means, labels, K, R):
        """Computes the Bayesian Information Criterion."""
        D = vq.whiten(D)
        Rn = D.shape[0]
        M = D.shape[1]

        if R == K:
            return 1

        # Maximum likelihood estimate (MLE)
        mle_var = 0
        for k in range(len(means)):
            X = D[np.argwhere(labels == k)]
            X = X.reshape((X.shape[0], X.shape[-1]))
            for x in X:
                mle_var += distance.euclidean(x, means[k])
                #print x, means[k], mle_var
        mle_var /= float(R - K)

        # Log-likelihood of the data
        l_D = - Rn/2. * np.log(2*np.pi) - (Rn * M)/2. * np.log(mle_var) - \
            (Rn - K) / 2. + Rn * np.log(Rn) - Rn * np.log(R)

        # Params of BIC
        p = (K-1) + M * K + mle_var

        #print "BIC:", l_D, p, R, K

        # Return the bic
        return l_D - p / 2. * np.log(R) 
开发者ID:urinieto,项目名称:msaf,代码行数:32,代码来源:xmeans.py

示例11: test_kmeans

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def test_kmeans(K=5):
    """Test k-means with the synthetic data."""
    X = XMeans.generate_2d_data(K=4)
    wX = vq.whiten(X)
    dic, dist = vq.kmeans(wX, K, iter=100)

    plt.scatter(wX[:, 0], wX[:, 1])
    plt.scatter(dic[:, 0], dic[:, 1], color="m")
    plt.show() 
开发者ID:urinieto,项目名称:msaf,代码行数:11,代码来源:xmeans.py

示例12: quantize

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def quantize(self):
        clusters = range(self.centroids.shape[0] + 1)
        histograms = {}
        for fname in sorted(self.data.keys()):
            if self.data[fname] is None: continue
            idx,_ = vq(self.data[fname], self.centroids)
            histograms[fname], _ = np.histogram(idx, bins=clusters, normed=self.normalize)
        return histograms 
开发者ID:douwekiela,项目名称:mmfeat,代码行数:10,代码来源:bow.py

示例13: sequences

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def sequences(self):
        sequences = {}
        for fname in sorted(self.data.keys()):
            if self.data[fname] is None: continue
            idx,_ = vq(self.data[fname], self.centroids)
            sequences[fname] = idx
        return sequences 
开发者ID:douwekiela,项目名称:mmfeat,代码行数:9,代码来源:bow.py

示例14: make_bow

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def make_bow(dataset, clusters, tfidf):
    print("Make bow vector for each frame")

    n_videos = len(dataset)

    bow = np.zeros((n_videos, clusters.shape[0]), dtype=np.float)

    # Make bow vectors for all videos.
    video_index = 0
    for video in dataset:
        visual_word_ids = vq(video["features"], clusters)[0]
        for word_id in visual_word_ids:
            bow[video_index, word_id] += 1
        video_index += 1

    # Check whether to use TF-IDF weighting.
    if tfidf:
        print("Applying TF-IDF weighting")
        freq = np.sum((bow > 0) * 1, axis = 0)
        idf = np.log((n_videos + 1) / (freq + 1))
        bow = bow * idf

    # Replace features in dataset with the bow vector we've computed.
    video_index = 0
    for i in range(len(dataset)):

        dataset[i]["features"] = bow[video_index]
        video_index += 1

        if (i + 1) % 50 == 0:
            print("Processed %d/%d videos" % (i + 1, len(dataset)))

    return dataset 
开发者ID:vkhoi,项目名称:KTH-Action-Recognition,代码行数:35,代码来源:make_bow_vector.py

示例15: make_bow

# 需要导入模块: from scipy.cluster import vq [as 别名]
# 或者: from scipy.cluster.vq import vq [as 别名]
def make_bow(dataset, clusters, tfidf):
    print("Make bow vector for each frame")

    # Count total number of frames.
    n_frames = 0
    for video in dataset:
        n_frames += len(video["features"])

    # Init bow vectors for all frames.
    bow = np.zeros((n_frames, clusters.shape[0]), dtype=np.float)

    # Make bow vectors for all frames.
    frame_index = 0
    for video in dataset:
        for frame in video["features"]:
            visual_word_ids = vq(frame, clusters)[0]
            for word_id in visual_word_ids:
                bow[frame_index, word_id] += 1
            frame_index += 1

    # Check whether to use TF-IDF weighting.
    if tfidf:
        print("Applying TF-IDF weighting")
        freq = np.sum((bow > 0) * 1, axis = 0)
        idf = np.log((n_frames + 1) / (freq + 1))
        bow = bow * idf

    # Replace features in dataset with the bow vector we've computed.
    frame_index = 0
    for i in range(len(dataset)):
        features = []
        for frame in dataset[i]["features"]:
            features.append(bow[frame_index])
            frame_index += 1

        dataset[i]["features"] = features

        if (i + 1) % 50 == 0:
            print("Processed %d/%d videos" % (i + 1, len(dataset)))

    return dataset 
开发者ID:vkhoi,项目名称:KTH-Action-Recognition,代码行数:43,代码来源:make_bow_vector.py


注:本文中的scipy.cluster.vq.vq方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。