当前位置: 首页>>代码示例>>Python>>正文


Python text.progprint_xrange函数代码示例

本文整理汇总了Python中pybasicbayes.util.text.progprint_xrange函数的典型用法代码示例。如果您正苦于以下问题:Python progprint_xrange函数的具体用法?Python progprint_xrange怎么用?Python progprint_xrange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了progprint_xrange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: svi_example

def svi_example(true_model, X, Z_true, mask):
    # Fit a test model
    model = FactorAnalysis(
        D_obs, D_latent,
        # W=true_model.W, sigmasq=true_model.sigmasq
        )

    # Add the data in minibatches
    N = X.shape[0]
    minibatchsize = 200
    prob = minibatchsize / float(N)

    lps = []
    angles = []
    N_iters = 100
    delay = 10.0
    forgetting_rate = 0.75
    stepsize = (np.arange(N_iters) + delay)**(-forgetting_rate)
    for itr in progprint_xrange(N_iters):
        minibatch = np.random.permutation(N)[:minibatchsize]
        X_mb, mask_mb = X[minibatch], mask[minibatch]
        lps.append(model.meanfield_sgdstep(X_mb, prob, stepsize[itr], masks=mask_mb))
        E_W, _, _, _ = model.regression.mf_expectations
        angles.append(principal_angle(true_model.W, E_W))

    # Compute the expected states for the first minibatch of data
    model.add_data(X, mask)
    statesobj = model.data_list.pop()
    statesobj.meanfieldupdate()
    Z_inf = statesobj.E_Z
    plot_results(lps, angles, Z_true, Z_inf)
开发者ID:mattjj,项目名称:pybasicbayes,代码行数:31,代码来源:factor_analysis.py

示例2: fit

    def fit(train_data, test_data, T, Niter, init_at_em, *args):
        resample = operator.methodcaller(method)

        def evaluate(model):
            ll, pll, perp = \
                model.log_likelihood(), model.log_likelihood(test_data), \
                model.perplexity(test_data)
            return ll, pll, perp

        def sample(model):
            tic = time.time()
            resample(model)
            timestep = time.time() - tic
            return evaluate(model), timestep

        print('Running %s...' % name)
        model = cls(train_data, T, *args)
        model = initializer(model) if init_at_em and initializer else model
        init_val = evaluate(model)
        vals, timesteps = zip(*[sample(model) for _ in progprint_xrange(Niter)])

        lls, plls, perps = zip(*((init_val,) + vals))
        timestamps = np.cumsum((0.,) + timesteps)

        return Results(lls, plls, perps, model.copy_sample(), timestamps)
开发者ID:fivejjs,项目名称:pgmult,代码行数:25,代码来源:ctm.py

示例3: fit

def fit(name, model, test_data, N_iter=1000, init_state_seq=None):
    def evaluate(model):
        ll = model.log_likelihood()
        pll = model.log_likelihood(test_data)
        N_used = len(model.used_states)
        trans = model.trans_distn
        alpha = trans.alpha
        gamma = trans.gamma if hasattr(trans, "gamma") else None
        rates = model.rates.copy()
        obs_hypers = model.obs_hypers
        # print 'N_states: {}, \tPLL:{}\n'.format(len(model.used_states), pll),
        return ll, pll, N_used, alpha, gamma, rates, obs_hypers

    def sample(model):
        tic = time.time()
        model.resample_model()
        timestep = time.time() - tic
        return evaluate(model), timestep

    # Initialize with given state seq
    if init_state_seq is not None:
        model.states_list[0].stateseq = init_state_seq
        for _ in xrange(100):
            model.resample_obs_distns()

    init_val = evaluate(model)
    vals, timesteps = zip(*[sample(model) for _ in progprint_xrange(N_iter)])

    lls, plls, N_used, alphas, gammas, rates, obs_hypers = \
        zip(*((init_val,) + vals))
    timestamps = np.cumsum((0.,) + timesteps)

    return Results(name, lls, plls, N_used, alphas, gammas,
                   rates, obs_hypers,
                   model.copy_sample(), timestamps)
开发者ID:DyutiB,项目名称:pyhsmm_spiketrains,代码行数:35,代码来源:fit_hipp_data.py

示例4: fit_hmm

def fit_hmm(Xs, Xtest, N_samples=100):
    model = MultinomialHMM(K, D)

    for X in Xs:
        model.add_data(X)

    samples = []
    lls = []
    test_lls = []
    pis = []
    zs = []
    timestamps = [time.time()]
    for smpl in progprint_xrange(N_samples):
        model.resample_model()
        timestamps.append(time.time())

        samples.append(model.copy_sample())
        # TODO: Use log_likelihood() to marginalize over z
        lls.append(model.log_likelihood())
        # lls.append(model.log_likelihood_fixed_z())
        test_lls.append(model.log_likelihood(Xtest))
        # pis.append(testmodel.pis()[0])
        zs.append(model.stateseqs[0])

    lls = np.array(lls)
    test_lls = np.array(test_lls)
    pis = np.array(pis)
    zs = np.array(zs)
    timestamps = np.array(timestamps)
    timestamps -= timestamps[0]

    return model, lls, test_lls, pis, zs, timestamps
开发者ID:ariddell,项目名称:pgmult,代码行数:32,代码来源:multinomial_lds_2.py

示例5: fit_hmm

def fit_hmm(Xs, Xtest, D_hmm, N_samples=100):
    print("Fitting HMM with %d states" % D_hmm)
    model = MultinomialHMM(K, D_hmm, alpha_0=10.0)

    for X in Xs:
        model.add_data(X)

    compute_pred_ll = lambda: sum([model.log_likelihood(np.vstack((Xs[i], Xtest[i])))
                                   - model.log_likelihood(Xs[i])
                                   for i,Xt in enumerate(Xtest)])

    init_results = (0, None, model.log_likelihood(),
                    model.log_likelihood(Xtest),
                    compute_pred_ll())

    def resample():
        tic = time.time()
        model.resample_model()
        toc = time.time() - tic

        return toc, None, model.log_likelihood(), \
            np.nan, \
            compute_pred_ll()

    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] +
            [resample() for _ in progprint_xrange(N_samples, perline=5)])))
    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
开发者ID:fivejjs,项目名称:pgmult,代码行数:30,代码来源:ap_lds.py

示例6: train_model

def train_model(model, train_data, test_data, N_samples=300, method='resample_model', thetas=None):
    print('Training %s with %s' % (model.__class__.__name__, method))
    model.add_data(train_data)

    # Initialize to a given set of thetas
    if thetas is not None:
        model.thetas = thetas
        for d in model.documents:
            d.resample_z()

    init_like, init_perp, init_sample, init_time = \
        model.log_likelihood(), model.perplexity(test_data), \
        model.copy_sample(), time.time()

    def update(i):
        operator.methodcaller(method)(model)
        # print "ll: ", model.log_likelihood()
        return model.log_likelihood(), \
               model.perplexity(test_data), \
               model.copy_sample(), \
               time.time()

    likes, perps, samples, timestamps = zip(*[update(i) for i in progprint_xrange(N_samples,perline=5)])

    # Get relative timestamps
    timestamps = np.array((init_time,) + timestamps)
    timestamps -= timestamps[0]

    return Results((init_like,) + likes,
                   (init_perp,) + perps,
                   (init_sample,) + samples,
                   timestamps)
开发者ID:ariddell,项目名称:pgmult,代码行数:32,代码来源:ctm.py

示例7: fit_hmm

def fit_hmm(Xs, Xtest, D_hmm, N_samples=100):
    print("Fitting HMM with %d states" % D_hmm)
    model = MultinomialHMM(K, D_hmm)

    for X in Xs:
        model.add_data(X)

    init_results = (0, None, model.log_likelihood(),
                    model.log_likelihood(Xtest),
                    (model.log_likelihood(np.vstack((Xs[0], Xtest))) - model.log_likelihood(Xs[0])))

    def resample():
        tic = time.time()
        model.resample_model()
        toc = time.time() - tic

        return toc, None, model.log_likelihood(), \
            model.log_likelihood(Xtest), \
            (model.log_likelihood(np.vstack((Xs[0], Xtest))) - model.log_likelihood(Xs[0]))

    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] + [resample() for _ in progprint_xrange(N_samples)])))
    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
开发者ID:fivejjs,项目名称:pgmult,代码行数:25,代码来源:dna_lds.py

示例8: svi_example

def svi_example(true_model, true_data):
    X, mask = true_data.X, true_data.mask

    # Fit a test model
    model = FactorAnalysis(
        D_obs, D_latent,
        # W=true_model.W, sigmasq=true_model.sigmasq
        )

    # Add the data in minibatches
    minibatchsize = 250
    for start in range(0, N, minibatchsize):
        end = min(start + minibatchsize, N)
        model.add_data(X[start:end], mask=mask[start:end])

    lps = []
    angles = []
    N_iters = 100
    delay = 10.0
    forgetting_rate = 0.75
    stepsize = (np.arange(N_iters) + delay)**(-forgetting_rate)
    for itr in progprint_xrange(N_iters):
        lps.append(model.meanfield_sgdstep(stepsize[itr]))
        E_W, _, _, _ = model.regression.mf_expectations
        angles.append(principal_angle(true_model.W, E_W))

    Z_inf = model.data_list[0].E_Z
    Z_true = true_data.Z[:Z_inf.shape[0]]
    plot_results(lps, angles, Z_true, Z_inf)
开发者ID:mathcg,项目名称:pybasicbayes,代码行数:29,代码来源:factor_analysis.py

示例9: fit_hmm

def fit_hmm(Xs, Xtest, D_hmm, N_samples=100):
    Nx = len(Xs)
    assert len(Xtest) == Nx

    print("Fitting HMM with %d states" % D_hmm)
    models = [MultinomialHMM(K, D_hmm, alpha_0=10.0) for _ in xrange(Nx)]

    for X, model in zip(Xs, models):
        model.add_data(X)

    def compute_pred_ll():
        pred_ll = 0
        for Xtr, Xte, model in zip(Xs, Xtest, models):
            pred_ll += model.log_likelihood(np.vstack((Xtr, Xte))) - model.log_likelihood(Xtr)
        return pred_ll

    init_results = (0, None, np.nan,  np.nan,  compute_pred_ll())

    def resample():
        tic = time.time()
        [model.resample_model() for model in models]
        toc = time.time() - tic

        return toc, None, np.nan, np.nan, compute_pred_ll()

    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] +
            [resample() for _ in progprint_xrange(N_samples, perline=5)])))
    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
开发者ID:fivejjs,项目名称:pgmult,代码行数:31,代码来源:ap_lds_indiv.py

示例10: fit_lds_model

def fit_lds_model(Xs, Xtest, D, N_samples=100):
    model = MultinomialLDS(K, D,
        init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
        dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
        sigma_C=0.01
        )

    for X in Xs:
        model.add_data(X)

    model.resample_parameters()

    init_results = (0, model, model.log_likelihood(),
                    model.heldout_log_likelihood(Xtest, M=1),
                    model.predictive_log_likelihood(Xtest, M=1000))

    def resample():
        tic = time.time()
        model.resample_model()
        toc = time.time() - tic

        return toc, None, model.log_likelihood(), \
            model.heldout_log_likelihood(Xtest, M=1), \
            model.predictive_log_likelihood(Xtest, M=1000)

    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] + [resample() for _ in progprint_xrange(N_samples)])))
    timestamps = np.cumsum(times)

    return Results(lls, test_lls, pred_lls, samples, timestamps)
开发者ID:fivejjs,项目名称:pgmult,代码行数:30,代码来源:dna_lds.py

示例11: ais

    def ais(self, N_samples=100, B=1000, steps_per_B=1,
            verbose=True, full_output=False, callback=None):
        """
        Since Gibbs sampling as a function of temperature is implemented,
        we can use AIS to approximate the marginal likelihood of the model.
        """
        # We use a linear schedule by default
        betas = np.linspace(0, 1, B)

        print "Estimating marginal likelihood with AIS"
        lw = np.zeros(N_samples)
        for m in progprint_xrange(N_samples):
            # Initialize the model with a draw from the prior
            self.initialize_from_prior()

            # Keep track of the log of the m-th weight
            # It starts at zero because the prior is assumed to be normalized
            lw[m] = 0.0

            # Sample the intermediate distributions
            for b in xrange(1,B):
                if verbose:
                    sys.stdout.write("M: %d\tBeta: %.3f \r" % (m,betas[b]))
                    sys.stdout.flush()

                # Compute the ratio of this sample under this distribution
                # and the previous distribution. The difference is added
                # to the log weight
                curr_lp = self.log_probability(temperature=betas[b])
                prev_lp = self.log_probability(temperature=betas[b-1])
                lw[m] += curr_lp - prev_lp

                # Sample the model at temperature betas[b]
                # Take some number of steps per beta in hopes that
                # the Markov chain will reach equilibrium.
                for s in range(steps_per_B):
                    self.collapsed_resample_model(temperature=betas[b])

                # Call the given callback
                if callback:
                    callback(self, m, b)

            if verbose:
                print ""
                print "W: %f" % lw[m]


        # Compute the mean of the weights to get an estimate of the normalization constant
        log_Z = -np.log(N_samples) + logsumexp(lw)

        # Use bootstrap to compute standard error
        subsamples = np.random.choice(lw, size=(100, N_samples), replace=True)
        log_Z_subsamples = logsumexp(subsamples, axis=1) - np.log(N_samples)
        std_log_Z = log_Z_subsamples.std()

        if full_output:
            return log_Z, std_log_Z, lw
        else:
            return log_Z, std_log_Z
开发者ID:sheqi,项目名称:pyglm,代码行数:59,代码来源:models.py

示例12: fit_gaussian_lds_model

def fit_gaussian_lds_model(Xs, Xtest, D_gauss_lds, N_samples=100):
    Nx = len(Xs)
    assert len(Xtest) == Nx

    print("Fitting Gaussian (Raw) LDS with %d states" % D_gauss_lds)
    from pylds.models import NonstationaryLDS
    models = [NonstationaryLDS(
                init_dynamics_distn=GaussianFixed(mu=np.zeros(D), sigma=1*np.eye(D)),
                dynamics_distn=AutoRegression(nu_0=D+1,S_0=1*np.eye(D),M_0=np.zeros((D,D)),K_0=1*np.eye(D)),
                emission_distn=Regression(nu_0=K+1,S_0=K*np.eye(K),M_0=np.zeros((K,D)),K_0=K*np.eye(D)))
              for _ in xrange(Nx)]

    Xs_centered = [X - np.mean(X, axis=0)[None,:] + 1e-3*np.random.randn(*X.shape) for X in Xs]
    for X, model in zip(Xs_centered, models):
        model.add_data(X)

    def compute_pred_ll():
        pred_ll = 0
        for Xtr, Xte, model in zip(Xs_centered, Xtest, models):
            # Monte Carlo sample to get pi density implied by Gaussian LDS
            Npred = 10
            Tpred = Xte.shape[0]
            preds = model.sample_predictions(Xtr, Tpred, Npred=Npred)

            # Convert predictions to a distribution by finding the
            # largest dimension for each predicted Gaussian.
            # Preds is T x K x Npred, inds is TxNpred
            inds = np.argmax(preds, axis=1)
            pi = np.array([np.bincount(inds[t], minlength=K) for t in xrange(Tpred)]) / float(Npred)
            assert np.allclose(pi.sum(axis=1), 1.0)

            pi = np.clip(pi, 1e-8, 1.0)
            pi /= pi.sum(axis=1)[:,None]

            # Compute the log likelihood under pi
            pred_ll += np.sum([Multinomial(weights=pi[t], K=K).log_likelihood(Xte[t][None,:])
                              for t in xrange(Tpred)])

        return pred_ll

    # TODO: Get initial pred ll
    init_results = (0, None, np.nan, np.nan, compute_pred_ll())

    def resample():
        tic = time.time()
        [model.resample_model() for model in models]
        toc = time.time() - tic


        return toc, None, np.nan, np.nan, compute_pred_ll()


    times, samples, lls, test_lls, pred_lls = \
        map(np.array, zip(*([init_results] +
            [resample() for _ in progprint_xrange(N_samples, perline=5)])))
    timestamps = np.cumsum(times)
    return Results(lls, test_lls, pred_lls, samples, timestamps)
开发者ID:fivejjs,项目名称:pgmult,代码行数:57,代码来源:ap_lds_indiv.py

示例13: fit_gaussian_lds_model

def fit_gaussian_lds_model(Xs, Xtest, D_gauss_lds, N_samples=100):
    print("Fitting Gaussian (Raw) LDS with %d states" % D_gauss_lds)
    model = DefaultLDS(n=D_gauss_lds, p=K)

    Xs_centered = [X - np.mean(X, axis=0)[None,:] + 1e-3*np.random.randn(*X.shape) for X in Xs]
    for X in Xs_centered:
        model.add_data(X)

    # TODO: Get initial pred ll
    init_results = (0, None, np.nan, np.nan, np.nan)


    def resample():
        tic = time.time()
        model.resample_model()
        toc = time.time() - tic

        # Monte Carlo sample to get pi density implied by Gaussian LDS
        Tpred = Xtest.shape[0]
        Npred = 1000

        preds = model.sample_predictions(Xs_centered[0], Tpred, Npred=Npred)

        # Convert predictions to a distribution by finding the
        # largest dimension for each predicted Gaussian.
        # Preds is T x K x Npred, inds is TxNpred
        inds = np.argmax(preds, axis=1)
        pi = np.array([np.bincount(inds[t], minlength=K) for t in xrange(Tpred)]) / float(Npred)
        assert np.allclose(pi.sum(axis=1), 1.0)

        pi = np.clip(pi, 1e-8, 1.0)
        pi /= pi.sum(axis=1)[:,None]

        # Compute the log likelihood under pi
        pred_ll = np.sum([Multinomial(weights=pi[t], K=K).log_likelihood(Xtest[t][None,:])
                          for t in xrange(Tpred)])

        return toc, None, np.nan, \
            np.nan, \
            pred_ll

    n_retries = 0
    max_attempts = 5
    while n_retries < max_attempts:
        try:
            times, samples, lls, test_lls, pred_lls = \
                map(np.array, zip(*([init_results] + [resample() for _ in progprint_xrange(N_samples)])))
            timestamps = np.cumsum(times)
            return Results(lls, test_lls, pred_lls, samples, timestamps)
        except Exception as e:
            print("Caught exception: ", e.message)
            print("Retrying")
            n_retries += 1

    raise Exception("Failed to fit the Raw Gaussian LDS model in %d attempts" % max_attempts)
开发者ID:fivejjs,项目名称:pgmult,代码行数:55,代码来源:dna_lds.py

示例14: meanfield_coordinate_descent

 def meanfield_coordinate_descent(self,tol=1e-1,maxiter=250,progprint=False,**kwargs):
     # NOTE: doesn't re-initialize!
     scores = []
     step_iterator = xrange(maxiter) if not progprint else progprint_xrange(maxiter)
     for itr in step_iterator:
         scores.append(self.meanfield_coordinate_descent_step(**kwargs))
         if scores[-1] is not None and len(scores) > 1:
             if np.abs(scores[-1]-scores[-2]) < tol:
                 return scores
     print('WARNING: meanfield_coordinate_descent hit maxiter of %d' % maxiter)
     return scores
开发者ID:josepablog,项目名称:pybasicbayes,代码行数:11,代码来源:abstractions.py

示例15: fit_discrete_time_model_gibbs

def fit_discrete_time_model_gibbs(S_dt, N_samples=100):

    # Now fit a DT model
    dt_model_test = pyhawkes.models.\
        DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
                                                   network_hypers=network_hypers)
    dt_model_test.add_data(S_dt)

    tic = time.time()
    for iter in progprint_xrange(N_samples, perline=25):
        dt_model_test.resample_model()
    toc = time.time()

    return (toc-tic) / N_samples
开发者ID:PerryZh,项目名称:pyhawkes,代码行数:14,代码来源:discrete_continuous_comparison.py


注:本文中的pybasicbayes.util.text.progprint_xrange函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。