当前位置: 首页>>代码示例>>Python>>正文


Python hmm.GaussianHMM类代码示例

本文整理汇总了Python中hmmlearn.hmm.GaussianHMM的典型用法代码示例。如果您正苦于以下问题:Python GaussianHMM类的具体用法?Python GaussianHMM怎么用?Python GaussianHMM使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了GaussianHMM类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: mainHMM

def mainHMM(filePrefix):
    X_train, length_train, X_test, length_test = loadOneRoute(filePrefix)
    # Run Gaussian HMM
    print "fitting to HMM and decoding ..."
    model = GaussianHMM(n_components=4, covariance_type="diag", n_iter=2000).fit(X_train[:, 0:5], length_train)
    hidden_states = model.predict(X_test[:, 0:5], length_test)
    print "done"

    print hidden_states[0:20]
    print hidden_states[20:40]
    print hidden_states[40:60]
    print hidden_states[60:80]

    # Print trained parameters and plot
    print("Transition matrix")
    print(model.transmat_)
    print("Start Prob")
    print(model.startprob_)

    print("Means and vars of each hidden state")
    for i in range(model.n_components):
        print("{0}th hidden state".format(i))
        print("mean = ", model.means_[i])
        print("var = ", np.diag(model.covars_[i]))


    print np.array(hidden_states).reshape((sum(length_test), 1))
开发者ID:windy-lf,项目名称:AirTicketPredicting,代码行数:27,代码来源:HMM_test.py

示例2: fit_batch

def fit_batch(traj_data, n_components=2, subsample_factor=1,
              features=['speed', 'rotation'], **kwargs):
    '''
    Fits model to concatenated traj_data
    Args:
        traj_data - list of paths of training dataset (trajectory csv)
        n_components - number of hidden states
        subsample_factor - subsample factor to apply to all files
        features - columns to fit model to
        **kwargs passed to GaussianHMM
    Returns:
        model - fitted model
    '''
    # Concatenate data
    feature_list = []
    lengths_list = []
    for path in traj_data:
        X, l = features_from_csv(path, features=features,
                                 subsample_factor=subsample_factor)
        feature_list.append(X)
        lengths_list.append(l)
    print 'Concatenating features...'
    X = np.vstack(feature_list)
    l = np.hstack(lengths_list)

    # Fit HMM
    print 'Fitting model...'
    model = GaussianHMM(n_components, **kwargs)
    model.fit(X, lengths=l)

    return model
开发者ID:J-Wall,项目名称:bee-tracking,代码行数:31,代码来源:traj_hmm.py

示例3: fit

	def fit(self):

		if self.verbose:
			print "[Clustering] Clearing old model and segmentation"
		
		self.segmentation = []
		self.model = []


		new_segments = []
		new_model = []

		g = GaussianHMM(n_components=self.n_components)

		all_demos = self._demonstrations[0]
		lens = [np.shape(self._demonstrations[0])[0]]
		for i in range(1, len(self._demonstrations)):
			all_demos = np.concatenate([all_demos,self._demonstrations[i]])
			lens.append(np.shape(self._demonstrations[i])[0])

		g.fit(all_demos,lens) 
			
		for d in self._demonstrations:
			new_segments.append(self.findTransitions(g.predict(d)))
			#print g.predict(d)
			new_model.append(g)

		self.segmentation = new_segments
		self.model = new_model
开发者ID:BerkeleyAutomation,项目名称:tsc,代码行数:29,代码来源:clustering.py

示例4: addModel

 def addModel(self, nom, data, nbEtats, n_iter, startprob_prior=None, transmat_prior=None):
     '''
     ajoute un model à tabModels
     
     paramètres :
     nom = nom du modèle
     data = tableau à trois dimension représentant un cluster possèdant des mouvements possèdant lui même des positions        
     nbEtats = nombre d'états cachés pour chaque modèle
     n_iter = nombre d'itérations pour l'algorithme de Baum-Welch
     startprob_prior = la matrice initiale à priori
     transmat_prior = la matrice de transition à priori des états
     '''
     model = GaussianHMM(nbEtats, covariance_type="diag", n_iter=n_iter, startprob_prior=startprob_prior, transmat_prior=transmat_prior)      
     model.fit(data)
     verif_set_transMat(model)
     taille = len(self.tabModels)
     if(taille == 0):
         self.tabModels.append([nom])
         self.tabModels[0].append(model)
         return
     for i in range(taille):        
         if(self.tabModels[i][0] == nom):
             self.tabModels[i].append(model)
             return
     self.tabModels.append([nom])
     self.tabModels[-1].append(model)
开发者ID:MaxenceQueyrel,项目名称:kinectApp,代码行数:26,代码来源:classSkeleton.py

示例5: select

    def select(self):
        """ select the best model for self.this_word based on
        BIC score for n between self.min_n_components and self.max_n_components

        :return: GaussianHMM object
        """
        warnings.filterwarnings("ignore", category=DeprecationWarning)

        # TODO implement model selection based on BIC scores
        # raise NotImplementedError
        record = float("inf")

        min_seq = min([len(seq) for seq in self.sequences])    
        self.max_n_components = min (self.max_n_components, min_seq) 

        hmm_model = self.base_model(self.n_constant)
        for num in range(self.min_n_components,self.max_n_components+1,1):
            #print(num)
            try: 
                model = GaussianHMM(n_components= num, n_iter=1000).fit(self.X, self.lengths)
                logL = model.score(self.X, self.lengths)
                # p is the number of free parameters, N is the number of data points
                p = num*num + 2* num* len(self.X[0]) -1
                BIC = -2* logL + p * np.log(len(self.X))
                if BIC < record:
                    record = BIC
                    hmm_model = model  
            except:
                continue
                # print("failure on {} with {} states".format(self.this_word, num))         
        return hmm_model
开发者ID:jychstar,项目名称:udacityClass,代码行数:31,代码来源:my_model_selectors.py

示例6: fit_HMM

 def fit_HMM(self,error_metric):
     print "Looking for optimal number of states and fitting HMM"
     for i in xrange(2,5):
         candidate = GaussianHMM(n_components=i, covariance_type="full", n_iter=1000)
         candidate.fit(self.X_train)
         if error_metric == HMM_MAD:
             error = HMM_MAD(candidate,self.X_test)
             if i == 2:
                 best_guess = error
                 best_model = candidate
                 opt_n_states = i
             else:
                 if error < best_guess:
                     opt_n_states = i
                     best_model = candidate
                     best_guess = error
         else:
             error = error_metric(candidate,self.X_test)
             if i == 2:
                 best_guess = error
                 best_model = candidate
                 opt_n_states = i
             else:
                 if error > best_guess:
                     opt_n_states = i
                     best_model = candidate
                     best_guess = error
     self.model = best_model
     self.n_states = opt_n_states
     print "Done. Lowest error of {} achieved with {} states".format(best_guess, opt_n_states)
开发者ID:pipette,项目名称:Electricity-load-disaggregation,代码行数:30,代码来源:HMM.py

示例7: select

    def select(self):
        """ select the best model for self.this_word based on
        BIC score for n between self.min_n_components and self.max_n_components

        :return: GaussianHMM object
        """
        warnings.filterwarnings("ignore", category=DeprecationWarning)

        best_score = float('inf')
        best_model = None
        for n in range(self.min_n_components, self.max_n_components+1):
            try:
                model = GaussianHMM(n_components=n, covariance_type="diag", n_iter=1000,
                                        random_state=self.random_state, verbose=False).fit(self.X, self.lengths)
                logL = model.score(self.X, self.lengths)
                # equation from udacity forum: https://discussions.udacity.com/t/how-to-start-coding-the-selectors/476905/10
                p = n ** 2 + 2 * n * len(self.X[0])  - 1
                N = len(self.X)
                score = -2 * logL + p * np.log(N)
                if score < best_score:
                    best_score = score
                    best_model = model
            except:
                continue
        return best_model
开发者ID:z-lu2017,项目名称:AIND-Recognizer,代码行数:25,代码来源:my_model_selectors.py

示例8: main

def main(args):
    x, X = loadDiffRows(args.diffFile)
    model = GaussianHMM(n_components=3,
                        covariance_type="diag",
                        n_iter=100000000000)
    model.transmat_ = numpy.array([[0.5, 0.5, 0.0],
                                   [0.0, 0.5, 0.5],
                                   [0.0, 0.0, 1.0]])
    model.fit(X)
    print(model.transmat_)
    model.transmat_[0][2] = 0.
    model.transmat_[1][0] = 0.
    model.transmat_[2][0] = 0.
    model.transmat_[2][1] = 0.
    
    exp = args.outFile.split('/')[-1].split('_')[0]
    with open(args.outFile, 'w') as fout:
        print('exp\tbin\treads\tstate', file=fout)
        for seq in X:
            hiddenStates = model.predict(seq)
            for idx,v in enumerate(zip(x,hiddenStates)):
                r,h = v
                print(exp + '\t' + str(idx) + '\t'
                      + str(r) + '\t' + str(h),
                      file=fout)
开发者ID:samesense,项目名称:pol2_states,代码行数:25,代码来源:hmm.py

示例9: fit_hmm

def fit_hmm(df, n_components, features=['speed', 'rotation'],
            **kwargs):
    '''
    Fits a Gaussian HMM to the velocity data
    Args:
        df - dataframe containing positional data to be processed
        n_components - number of hidden states
        features - features to use in model fitting
        **kwargs passed to GaussianHMM
    Returns:
        model
    '''
    X, lengths = get_features(df, features=features)
    model = GaussianHMM(n_components, **kwargs)
    model.fit(X, lengths=lengths)

    return model
开发者ID:J-Wall,项目名称:bee-tracking,代码行数:17,代码来源:traj_hmm.py

示例10: setup

        def setup():

            def load_patterns(file):
                patterns = None
                sizes = np.zeros(len(words))
                counter = 0

                f = open(file, 'rb')
                data = f.readlines()

                stack = []
                for i in range(np.shape(data)[0]):
                    data2 = map(float, data[i].split())
                    data2 = np.reshape(data2, (1, -1))
                    if i == 0:
                        stack = data2
                    else:
                        stack = np.vstack((stack, data2))

                f.close()
                sizes[counter] = np.shape(stack)[0]
                counter += 1

                if patterns is None:
                    patterns = stack
                else:
                    patterns = np.vstack((patterns, stack))

                return patterns

            hidden = 1

            self.go_model = GaussianHMM(n_components=hidden, covariance_type="diag", n_iter=10000).fit(
                load_patterns('go.bin'))

            self.back_model = GaussianHMM(n_components=hidden, covariance_type="diag", n_iter=10000).fit(
                load_patterns('back.bin'))

            self.right_model = GaussianHMM(n_components=hidden, covariance_type="diag", n_iter=10000).fit(
                load_patterns('right.bin'))

            self.left_model = GaussianHMM(n_components=hidden, covariance_type="diag", n_iter=10000).fit(
                load_patterns('left.bin'))

            self.stop_model = GaussianHMM(n_components=hidden, covariance_type="diag", n_iter=10000).fit(
                load_patterns('stop.bin'))
开发者ID:psilva-leo,项目名称:SpeechRecognition,代码行数:46,代码来源:hmm.py

示例11: select

    def select(self):
        warnings.filterwarnings("ignore", category=DeprecationWarning)

        max_score = None
        max_model = None

        for n in range(self.min_n_components, self.max_n_components + 1):
            try:
                all_score = 0.0
                qty = 0
                final_model = None
                if (len(self.sequences) >= 2):
                    # Generate K folds
                    folds = min(len(self.sequences),3)
                    split_method = KFold(shuffle=True, n_splits=folds)
                    parts = split_method.split(self.sequences)
                    for cv_train_idx, cv_test_idx in parts:
                        # Kfold information for train
                        X_train, lengths_train = np.asarray(combine_sequences(cv_train_idx, self.sequences))
                        # Kfold information for test
                        X_test, lengths_test = np.asarray(combine_sequences(cv_test_idx, self.sequences))
                        # Fit model with train data
                        model = GaussianHMM(n_components=n, covariance_type="diag", n_iter=1000,
                                        random_state=self.random_state, verbose=False).fit(X_train, lengths_train)
                        # Get score using test data
                        all_score = all_score+model.score(X_test,lengths_test)
                        qty = qty+1
                    # Calculate score
                    score = all_score / qty
                else:
                    # cant be fold
                    final_model = GaussianHMM(n_components=n, covariance_type="diag", n_iter=1000,
                                        random_state=self.random_state, verbose=False).fit(self.X, self.lengths)
                    score = model.score(self.X, self.lengths)
                # Keep model with best score
                if max_score is None or max_score < score:
                    max_score = score
                    if final_model is None:
                        final_model = GaussianHMM(n_components=n, covariance_type="diag", n_iter=1000,
                                                  random_state=self.random_state, verbose=False).fit(self.X, self.lengths)
                    max_model = final_model

            except:
                pass

        return max_model
开发者ID:harishmalan,项目名称:AI,代码行数:46,代码来源:my_model_selectors.py

示例12: train

    def train(self, data, n_components):
        print("Training Data: %s" % data)
        self.data = data
        self.model = GaussianHMM(n_components, algorithm='viterbi', covariance_type='diag')
        X = np.reshape(data, (len(data),1))
        self.model = self.model.fit([X])

        self.hidden_states = self.model.predict(X)
        print("Sequence of States: " % self.hidden_states)
开发者ID:mkdmkk,项目名称:infaas,代码行数:9,代码来源:hmm.py

示例13: HmmClassifier

class HmmClassifier():
    def __init__(self, referenceSeqs, inputSeq):
        self.referenceSeqs = referenceSeqs
        self.inputSeq = inputSeq

        # feel free to change this model
        self.model = GaussianHMM(n_components=2, covariance_type="full", n_iter=2000)

    def predict(self):
        probs = []
        for referenceSeq in self.referenceSeqs:
            #print "reference: {}".format(referenceSeq)
            self.model.fit(referenceSeq)
            hidden_states = self.model.predict(referenceSeq)
            prob = self.model.score(self.inputSeq)
            probs.append(prob)

        # return the index of the max prob
        return probs.index(max(probs))
开发者ID:lujunzju,项目名称:AirTicketPredicting,代码行数:19,代码来源:HmmClassifier.py

示例14: __init__

class HMM:
    __slots__ = [
        "model"
    ]

    def __init__(self):
        pass


    def draw(self, data):
        figure()
        plot(range(len(data)),data,alpha=0.8,color='red')
        show()


    def train(self, data, n_components):
        print("Training Data: %s" % data)
        self.data = data
        self.model = GaussianHMM(n_components, algorithm='viterbi', covariance_type='diag')
        X = np.reshape(data, (len(data),1))
        self.model = self.model.fit([X])

        self.hidden_states = self.model.predict(X)
        print("Sequence of States: " % self.hidden_states)


    def eval(self, obs):
        print("Testing Data: %s" % obs)
        X = np.reshape(obs, (len(obs),1))
        print("Eval: %s" % str(self.model.score(X)))


    def plot(self):
        fig = figure(facecolor="white")
        ax = fig.add_subplot(111)

        for i in range(self.model.n_components):
            # use fancy indexing to plot data in each state
            idx = (self.hidden_states == i)
            ax.plot(np.array(range(len(self.data)))[idx], np.array(self.data)[idx], '.', label="State %d" % (i+1))

        ax.legend()
        show()
开发者ID:mkdmkk,项目名称:infaas,代码行数:43,代码来源:hmm.py

示例15: test_backward_with_hmmlearn

    def test_backward_with_hmmlearn(self):
        r = np.random.randn
        obs = [np.array([[-600 + r(), 100 + r()], [-300 + r(), 200 + r()], [0 + r(), 300 + r()]]) for _ in xrange(10)]
        hmm = GaussianHMM(n_components=3)
        hmm.fit(obs)

        # Calculcate bwdlattice using hmmlearn algorithm
        framelogprob = hmm._compute_log_likelihood(obs[0])
        start = timeit.default_timer()
        bwdlattice1 = hmm._do_backward_pass(framelogprob)
        print('hmmlearn took %fs' % (timeit.default_timer() - start))

        # Calculate bwdlattice using fhmm algorithm with #chains = 1. This should yield the exact same results
        start = timeit.default_timer()
        bwdlattice2 = np.zeros(bwdlattice1.shape)
        fhmmc._backward(obs[0].shape[0], 1, hmm.n_components, [(x,) for x in xrange(hmm.n_components)],
                        hmm._log_startprob.reshape(1, 3), hmm._log_transmat.reshape(1, 3, 3), framelogprob, bwdlattice2)
        print('fhmm took %fs' % (timeit.default_timer() - start))
        self.assertTrue(np.allclose(bwdlattice1, bwdlattice2))
开发者ID:caomw,项目名称:motion-classification,代码行数:19,代码来源:test_fhmm.py


注:本文中的hmmlearn.hmm.GaussianHMM类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。