當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.infty方法代碼示例

本文整理匯總了Python中numpy.infty方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.infty方法的具體用法?Python numpy.infty怎麽用?Python numpy.infty使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.infty方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: extend

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def extend(self, token, log_prob, state, decoder_output, encoder_mask, attn_dist, p_gen, coverage):
    """Return a NEW hypothesis, extended with the information from the latest step of beam search.

    Args:
      token: Integer. Latest token produced by beam search.
      log_prob: Float. Log prob of the latest token.
      state: Current decoder state, a LSTMStateTuple.
      attn_dist: Attention distribution from latest step. Numpy array shape (attn_length).
      p_gen: Generation probability on latest step. Float.
      coverage: Latest coverage vector. Numpy array shape (attn_length), or None if not using coverage.
    Returns:
      New Hypothesis for next step.
    """
    if FLAGS.avoid_trigrams and self._has_trigram(self.tokens + [token]):
        log_prob = -np.infty
    return Hypothesis(tokens = self.tokens + [token],
                      log_probs = self.log_probs + [log_prob],
                      state = state,
                      decoder_output= self.decoder_output + [decoder_output] if decoder_output is not None else [],
                      encoder_mask = self.encoder_mask + [encoder_mask] if encoder_mask is not None else [],
                      attn_dists = self.attn_dists + [attn_dist],
                      p_gens = self.p_gens + [p_gen],
                      coverage = coverage) 
開發者ID:yaserkl,項目名稱:TransferRL,代碼行數:25,代碼來源:beam_search.py

示例2: fit

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def fit(self, X):
        """Fit GMM parameters to X

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)

        """
        # initialize
        self._initialize_parameters(X, self.random_state)
        lower_bound = -np.infty

        for n in range(self.n_iter):
            # E-step
            log_prob_norm, log_resp = self._e_step(X)

            # M-step
            self._m_step(X, log_resp)

            # check convergence
            back_lower_bound = lower_bound
            lower_bound = self._compute_lower_bound(
                log_resp, log_prob_norm) 
開發者ID:k2kobayashi,項目名稱:sprocket,代碼行數:25,代碼來源:diagGMM.py

示例3: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def __init__(self, reg_e=1., reg_cl=0.1,
                 max_iter=10, max_inner_iter=200, log=False,
                 tol=10e-9, verbose=False,
                 metric="sqeuclidean", norm=None,
                 distribution_estimation=distribution_estimation_uniform,
                 out_of_sample_map='ferradans', limit_max=np.infty):
        self.reg_e = reg_e
        self.reg_cl = reg_cl
        self.max_iter = max_iter
        self.max_inner_iter = max_inner_iter
        self.tol = tol
        self.log = log
        self.verbose = verbose
        self.metric = metric
        self.norm = norm
        self.distribution_estimation = distribution_estimation
        self.out_of_sample_map = out_of_sample_map
        self.limit_max = limit_max 
開發者ID:PythonOT,項目名稱:POT,代碼行數:20,代碼來源:da.py

示例4: test_monotonic_likelihood

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def test_monotonic_likelihood():
    # We check that each step of the each step of variational inference without
    # regularization improve monotonically the training set of the bound
    rng = np.random.RandomState(0)
    rand_data = RandomData(rng, scale=20)
    n_components = rand_data.n_components

    for prior_type in PRIOR_TYPE:
        for covar_type in COVARIANCE_TYPE:
            X = rand_data.X[covar_type]
            bgmm = BayesianGaussianMixture(
                weight_concentration_prior_type=prior_type,
                n_components=2 * n_components, covariance_type=covar_type,
                warm_start=True, max_iter=1, random_state=rng, tol=1e-4)
            current_lower_bound = -np.infty
            # Do one training iteration at a time so we can make sure that the
            # training log likelihood increases after each iteration.
            for _ in range(600):
                prev_lower_bound = current_lower_bound
                current_lower_bound = bgmm.fit(X).lower_bound_
                assert_greater_equal(current_lower_bound, prev_lower_bound)

                if bgmm.converged_:
                    break
            assert(bgmm.converged_) 
開發者ID:PacktPublishing,項目名稱:Mastering-Elasticsearch-7.0,代碼行數:27,代碼來源:test_bayesian_mixture.py

示例5: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def __init__(self, data, max_mixture=10, threshold=0.1):
        """
        Class constructor, arguments include:
            data - data to build GMM model
            max_mixture - max number of Gaussian mixtures
            threshold - probability threhold to determine fense
        """
        self.data = data
        self.thresh = threshold
        lowest_bic = np.infty
        components = 1
        bic = []
        n_components_range = range(1, max_mixture + 1)
        for n_components in n_components_range:
            # Fit a Gaussian mixture with EM
            gmm = mixture.GaussianMixture(n_components=n_components,
                                          random_state=1005)
            gmm.fit(data)
            bic.append(gmm.bic(data))
            if bic[-1] < lowest_bic:
                lowest_bic = bic[-1]
                best_gmm = gmm
                components = n_components
        log.debug('best gmm components number: %d, bic %f ', components, lowest_bic)
        self.gmm = best_gmm 
開發者ID:intel,項目名稱:platform-resource-manager,代碼行數:27,代碼來源:gmmfense.py

示例6: test_constants

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def test_constants():
    assert chainerx.Inf is numpy.Inf
    assert chainerx.Infinity is numpy.Infinity
    assert chainerx.NAN is numpy.NAN
    assert chainerx.NINF is numpy.NINF
    assert chainerx.NZERO is numpy.NZERO
    assert chainerx.NaN is numpy.NaN
    assert chainerx.PINF is numpy.PINF
    assert chainerx.PZERO is numpy.PZERO
    assert chainerx.e is numpy.e
    assert chainerx.euler_gamma is numpy.euler_gamma
    assert chainerx.inf is numpy.inf
    assert chainerx.infty is numpy.infty
    assert chainerx.nan is numpy.nan
    assert chainerx.newaxis is numpy.newaxis
    assert chainerx.pi is numpy.pi 
開發者ID:chainer,項目名稱:chainer,代碼行數:18,代碼來源:test_constants.py

示例7: weiss_retinex

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def weiss_retinex(image, multi_images, mask, threshold, L1=False):
    multi_images = np.clip(multi_images, 3., np.infty)
    log_multi_images = np.log(multi_images)

    i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
    r_y = np.median(i_y_all, axis=2)
    r_x = np.median(i_x_all, axis=2)

    r_y *= (np.abs(r_y) > threshold)
    r_x *= (np.abs(r_x) > threshold)
    if L1:
        log_refl = poisson.solve_L1(r_y, r_x, mask)
    else:
        log_refl = poisson.solve(r_y, r_x, mask)
    refl = np.where(mask, np.exp(log_refl), 0.)
    shading = np.where(mask, image / refl, 0.)

    return shading, refl





#################### Wrapper classes for experiments ########################### 
開發者ID:seanbell,項目名稱:opensurfaces,代碼行數:26,代碼來源:intrinsic.py

示例8: assert_mpa_identical

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def assert_mpa_identical(mpa1, mpa2, decimal=np.infty):
    """Verify that two MPAs are complety identical
    """
    assert len(mpa1) == len(mpa2)
    assert mpa1.canonical_form == mpa2.canonical_form
    assert mpa1.dtype == mpa2.dtype

    for i, lten1, lten2 in zip(it.count(), mpa1.lt, mpa2.lt):
        if decimal is np.infty:
            assert_array_equal(lten1, lten2,
                               err_msg='mismatch in lten {}'.format(i))
        else:
            assert_array_almost_equal(lten1, lten2, decimal=decimal,
                                      err_msg='mismatch in lten {}'.format(i))
    # TODO: We should make a comprehensive comparison between `mpa1`
    # and `mpa2`.  Are we missing other things? 
開發者ID:dsuess,項目名稱:mpnum,代碼行數:18,代碼來源:_testing.py

示例9: bsoid_hdbscan

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def bsoid_hdbscan(umap_embeddings, hdbscan_params=HDBSCAN_PARAMS):
    """
    Trains HDBSCAN (unsupervised) given learned UMAP space
    :param umap_embeddings: 2D array, embedded UMAP space
    :param hdbscan_params: dict, HDBSCAN params in GLOBAL_CONFIG
    :return assignments: HDBSCAN assignments
    """
    highest_numulab = -np.infty
    numulab = []
    min_cluster_range = range(6, 21)
    logging.info('Running HDBSCAN on {} instances in {} D space...'.format(*umap_embeddings.shape))
    for min_c in min_cluster_range:
        trained_classifier = hdbscan.HDBSCAN(prediction_data=True,
                                             min_cluster_size=int(round(0.001 * min_c * umap_embeddings.shape[0])),
                                             **hdbscan_params).fit(umap_embeddings)
        numulab.append(len(np.unique(trained_classifier.labels_)))
        if numulab[-1] > highest_numulab:
            logging.info('Adjusting minimum cluster size to maximize cluster number...')
            highest_numulab = numulab[-1]
            best_clf = trained_classifier
    assignments = best_clf.labels_
    soft_clusters = hdbscan.all_points_membership_vectors(best_clf)
    soft_assignments = np.argmax(soft_clusters, axis=1)
    logging.info('Done predicting labels for {} instances in {} D space...'.format(*umap_embeddings.shape))
    return assignments, soft_clusters, soft_assignments 
開發者ID:YttriLab,項目名稱:B-SOID,代碼行數:27,代碼來源:train.py

示例10: train_gmm

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def train_gmm(X, max_iter, tol, means, covariances):
    xp = cupy.get_array_module(X)
    lower_bound = -np.infty
    converged = False
    weights = xp.array([0.5, 0.5], dtype=np.float32)
    inv_cov = 1 / xp.sqrt(covariances)

    for n_iter in range(max_iter):
        prev_lower_bound = lower_bound
        log_prob_norm, log_resp = e_step(X, inv_cov, means, weights)
        weights, means, covariances = m_step(X, xp.exp(log_resp))
        inv_cov = 1 / xp.sqrt(covariances)
        lower_bound = log_prob_norm
        change = lower_bound - prev_lower_bound
        if abs(change) < tol:
            converged = True
            break

    if not converged:
        print('Failed to converge. Increase max-iter or tol.')

    return inv_cov, means, weights, covariances 
開發者ID:cupy,項目名稱:cupy,代碼行數:24,代碼來源:gmm.py

示例11: test_sweepequalmatrix

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def test_sweepequalmatrix(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      step, level, problem, nnodes = self.setupLevelStepProblem()
      step.levels[0].sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform node-to-node SDC sweep
      level.sweep.update_nodes()

      lambdas = [ problem.params.lambda_f[0] , problem.params.lambda_s[0] ]
      LHS, RHS = level.sweep.get_scalar_problems_sweeper_mats( lambdas = lambdas )

      unew = np.linalg.inv(LHS).dot( u0full + RHS.dot(u0full) )
      usweep = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])
      assert np.linalg.norm(unew - usweep, np.infty)<1e-14, "Single SDC sweeps in matrix and node-to-node formulation yield different results"

  #
  # Make sure the implemented update formula matches the matrix update formula
  # 
開發者ID:Parallel-in-Time,項目名稱:pySDC,代碼行數:22,代碼來源:test_imexsweeper.py

示例12: test_updateformula

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def test_updateformula(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      step, level, problem, nnodes = self.setupLevelStepProblem()
      level.sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform update step in sweeper
      level.sweep.update_nodes()
      ustages = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])
      # Compute end value through provided function
      level.sweep.compute_end_point()
      uend_sweep = level.uend.values
      # Compute end value from matrix formulation
      if level.sweep.params.do_coll_update:
        uend_mat   = self.pparams['u0'] + step.dt*level.sweep.coll.weights.dot(ustages*(problem.params.lambda_s[0] + problem.params.lambda_f[0]))
      else:
        uend_mat = ustages[-1]
      assert np.linalg.norm(uend_sweep - uend_mat, np.infty)<1e-14, "Update formula in sweeper gives different result than matrix update formula"


  #
  # Compute the exact collocation solution by matrix inversion and make sure it is a fixed point
  # 
開發者ID:Parallel-in-Time,項目名稱:pySDC,代碼行數:26,代碼來源:test_imexsweeper.py

示例13: test_updateformula_no_coll_update

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def test_updateformula_no_coll_update(self):
    for type in classes:
      self.swparams['collocation_class'] = type
      self.swparams['do_coll_update'] = False
      step, level, problem, nnodes = self.setupLevelStepProblem()
      # if type of nodes does not have right endpoint as quadrature nodes, cannot set do_coll_update to False and perform this test
      if not level.sweep.coll.right_is_node: break
      level.sweep.predict()
      u0full = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Perform update step in sweeper
      level.sweep.update_nodes()
      ustages = np.array([ level.u[l].values.flatten() for l in range(1,nnodes+1) ])

      # Compute end value through provided function
      level.sweep.compute_end_point()
      uend_sweep = level.uend.values
      # Compute end value from matrix formulation
      q = np.zeros(nnodes)
      q[nnodes-1] = 1.0
      uend_mat   = q.dot(ustages)
      assert np.linalg.norm(uend_sweep - uend_mat, np.infty)<1e-14, "For do_coll_update=False, update formula in sweeper gives different result than matrix update formula with q=(0,..,0,1)" 
開發者ID:Parallel-in-Time,項目名稱:pySDC,代碼行數:24,代碼來源:test_imexsweeper.py

示例14: cal_Informativeness

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def cal_Informativeness(self, label_index, unlabel_index, models):
        """

        Returns:
        Info : 2d array-like 
        shape [n_unlabel_samples, n_classes]
        Informativeness of each unlabel samples
        """
        Infor = np.zeros((self.n_samples, self.n_classes))
        Uncertainty = self.cal_uncertainty(label_index, unlabel_index, models)
        label_mat = label_index.get_matrix_mask((self.n_samples, self.n_classes), sparse=False)
        unlabel_mat = unlabel_index.get_matrix_mask((self.n_samples, self.n_classes), sparse=False)
        for j in np.arange(self.n_classes):
            j_unlabel = np.where(unlabel_mat[:, j] == 1)[0]
            j_label = np.where(unlabel_mat[:, j] != 1)[0]
            for i in j_unlabel:
                flag = self.cal_relevance(i, j, label_index, models, k=5)
                if flag == 1:
                    Infor[i][j] = Uncertainty[i][j] * 2
                elif flag == -1:
                    Infor[i][j] = Uncertainty[i][j] + self.cal_Udes(i, j, Uncertainty)
            Infor[j_label][j] = -np.infty
        return Infor 
開發者ID:NUAA-AL,項目名稱:ALiPy,代碼行數:25,代碼來源:cost_sensitive.py

示例15: cal_uncertainty

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import infty [as 別名]
def cal_uncertainty(self, target, models):
        """Calculate the uncertainty.
        target: unlabel_martix
        """
        Uncertainty = np.zeros([self.n_samples, self.n_classes])
        # unlabel_data = self.X[unlabel_index, :]
        for j in np.arange(self.n_classes):
            model = models[j]
            j_target = target[:, j]
            j_label = np.where(j_target != 1)
            j_unlabel = np.where(j_target == 1)
            for i in j_unlabel[0]:
                d_v = model.decision_function([self.X[i]])
                Uncertainty[i][j] = np.abs(1 / d_v)
            Uncertainty[j_label, j] = -np.infty
        return Uncertainty 
開發者ID:NUAA-AL,項目名稱:ALiPy,代碼行數:18,代碼來源:cost_sensitive.py


注:本文中的numpy.infty方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。