当前位置: 首页>>代码示例>>Python>>正文


Python utils.check_random_state方法代码示例

本文整理汇总了Python中sklearn.utils.check_random_state方法的典型用法代码示例。如果您正苦于以下问题:Python utils.check_random_state方法的具体用法?Python utils.check_random_state怎么用?Python utils.check_random_state使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.utils的用法示例。


在下文中一共展示了utils.check_random_state方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_regression

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def test_regression():
    # Check regression for various parameter settings.
    rng = check_random_state(0)
    X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
                                                        boston.target[:50],
                                                        random_state=rng)
    grid = ParameterGrid({"max_samples": [0.5, 1.0],
                          "max_features": [0.5, 1.0],
                          "bootstrap": [True, False],
                          "bootstrap_features": [True, False]})

    for base_estimator in [None,
                           DummyRegressor(),
                           DecisionTreeRegressor(),
                           KNeighborsRegressor(),
                           SVR(gamma='scale')]:
        for params in grid:
            BaggingRegressor(base_estimator=base_estimator,
                             random_state=rng,
                             **params).fit(X_train, y_train).predict(X_test) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_bagging.py

示例2: __init__

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def __init__(self, roi, n_folds=3, mask_values=[0], shuffle=False,
                 random_state=None):
        self.roi = roi
        self.n_folds = n_folds
        if isinstance(mask_values, (float, int)):
            self.mask_values = np.array([mask_values])
        elif isinstance(mask_values, (list, tuple)):
            self.mask_values = np.array(mask_values)
        elif isinstance(mask_values, np.ndarray):
            self.mask_values = mask_values
        else:
            raise TypeError('mask_values must be float, int, list, tuple,'
                            ' or np.ndarray')
        if shuffle:
            self.shuffle = True
            self.rng = check_random_state(random_state)

        self._label_roi() 
开发者ID:ceholden,项目名称:yatsm,代码行数:20,代码来源:diagnostics.py

示例3: sample_blobs

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def sample_blobs(n, ratio, rows=5, cols=5, sep=10, rs=None):
    rs = check_random_state(rs)
    # ratio is eigenvalue ratio
    correlation = (ratio - 1) / (ratio + 1)

    # generate within-blob variation
    mu = np.zeros(2)
    sigma = np.eye(2)
    X = rs.multivariate_normal(mu, sigma, size=n)

    corr_sigma = np.array([[1, correlation], [correlation, 1]])
    Y = rs.multivariate_normal(mu, corr_sigma, size=n)

    # assign to blobs
    X[:, 0] += rs.randint(rows, size=n) * sep
    X[:, 1] += rs.randint(cols, size=n) * sep
    Y[:, 0] += rs.randint(rows, size=n) * sep
    Y[:, 1] += rs.randint(cols, size=n) * sep

    return X, Y


################################################################################
### Sample images from GANs 
开发者ID:djsutherland,项目名称:opt-mmd,代码行数:26,代码来源:generate.py

示例4: __init__

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def __init__(self, test_model=False, verify_model=True):
        model = Word2Vec.load(modelfile)

        if(test_model):
            acc = model.accuracy(questionfile)
            logger.info("Test model " + modelfile + " in " + questionfile)

        self.vector_size = model.vector_size
        self.vocab_size = len(model.wv.vocab) + 1
        self.word2index = self.GetWord2Index(model)
        self.index2word = self.GetIndex2Word(model)
        self.wordvector = self.GetWordVector(model)

        if(verify_model):
            logger.info("Verifing imported word2vec model")
            random_state = check_random_state(12)
            check_index = random_state.randint(low=0, high=self.vocab_size-2,size=1000)
            for index in check_index:
                word_wv = model.wv.index2word[index]
                word_our = self.index2word[index+1]
                #print(index, word_wv, word_our)
                assert word_wv == word_our
                assert model.wv.vocab[word_our].index == self.word2index[word_our] - 1
                assert np.array_equal(model.wv[word_our], self.wordvector[self.word2index[word_our]])
            logger.info("Imported word2vec model is verified") 
开发者ID:sefira,项目名称:question-classification-cnn-rnn-attention,代码行数:27,代码来源:word2vec_helpers.py

示例5: setUp

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def setUp(self):
        # Make an X that looks somewhat like a small tf-idf matrix.
        # XXX newer versions of SciPy >0.16 have scipy.sparse.rand for this.
        shape = 60, 55
        n_samples, n_features = shape
        rng = check_random_state(42)
        X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
        X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
        X.data[:] = 1 + np.log(X.data)
        self.X = X
        self.Xdense = X.A
        self.n_samples = n_samples
        self.n_features = n_features

        self.session = new_session().as_default()
        self._old_executor = self.session._sess._executor
        self.executor = self.session._sess._executor = \
            ExecutorForTest('numpy', storage=self.session._sess._context) 
开发者ID:mars-project,项目名称:mars,代码行数:20,代码来源:test_truncated_svd.py

示例6: __init__

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def __init__(self,
                 basis=LinearBasis(),
                 var=Parameter(gamma(1.), Positive()),
                 tol=1e-8,
                 maxiter=1000,
                 nstarts=100,
                 random_state=None
                 ):
        """See class docstring."""
        self.basis = basis
        self.var = var
        self.tol = tol
        self.maxiter = maxiter
        self.nstarts = nstarts
        self.random_state = random_state
        self.random_ = check_random_state(random_state) 
开发者ID:NICTA,项目名称:revrand,代码行数:18,代码来源:slm.py

示例7: make_polynomial

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def make_polynomial(degree=3, n_samples=100, bias=0.0, noise=0.0,
                    return_coefs=False, random_state=None):
    """
    Generate a noisy polynomial for a regression problem

    Examples
    --------
    >>> X, y, coefs = make_polynomial(degree=3, n_samples=200, noise=.5,
    ...                               return_coefs=True, random_state=1)
    """
    generator = check_random_state(random_state)

    # TODO: Add arguments to support other priors
    coefs = generator.randn(degree + 1)
    pows = np.arange(degree + 1)
    poly = np.vectorize(lambda x: np.sum(coefs * x ** pows))
    X, y = make_regression(poly, n_samples=n_samples, bias=bias, noise=noise,
                           random_state=random_state)
    if return_coefs:
        return X, y, coefs

    return X, y 
开发者ID:NICTA,项目名称:revrand,代码行数:24,代码来源:datasets.py

示例8: __init__

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def __init__(self,
                 likelihood=Gaussian(),
                 basis=LinearBasis(),
                 K=10,
                 maxiter=3000,
                 batch_size=10,
                 updater=None,
                 nsamples=50,
                 nstarts=500,
                 random_state=None
                 ):
        """See class docstring."""
        self.likelihood = likelihood
        self.basis = basis
        self.K = K
        self.maxiter = maxiter
        self.batch_size = batch_size
        self.updater = updater
        self.nsamples = nsamples
        self.nstarts = nstarts
        self.random_state = random_state  # For clone compatibility
        self.random_ = check_random_state(self.random_state) 
开发者ID:NICTA,项目名称:revrand,代码行数:24,代码来源:glm.py

示例9: __init__

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def __init__(self,
                 nbases,
                 Xdim,
                 mean=Parameter(norm_dist(), Bound()),
                 lenscale=Parameter(gamma(1.), Positive()),
                 regularizer=None,
                 random_state=None
                 ):
        """See this class's docstring."""
        self.random_state = random_state  # for repr
        self._random = check_random_state(random_state)
        self._init_dims(nbases, Xdim)
        self._params = [self._init_param(mean),
                        self._init_param(lenscale)]
        self._init_matrices()
        super(_LengthScaleBasis, self).__init__(regularizer) 
开发者ID:NICTA,项目名称:revrand,代码行数:18,代码来源:basis_functions.py

示例10: generate_experiment_data

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def generate_experiment_data(n=200, p=200, rho=0.6, random_state=3245):
    rng = check_random_state(random_state)

    sigma = np.eye(p)
    sigma[0, 2] = rho
    sigma[2, 0] = rho
    sigma[1, 2] = rho
    sigma[2, 1] = rho

    X = rng.multivariate_normal(mean=np.zeros(p), cov=sigma, size=(n,))
    beta = np.zeros(p)
    beta[:2] = 1.0
    epsilon = rng.normal(0.0, 0.25, size=(n,))

    y = np.matmul(X, beta) + epsilon

    return X, y 
开发者ID:scikit-learn-contrib,项目名称:stability-selection,代码行数:19,代码来源:test_randomized_lasso.py

示例11: _check_params

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def _check_params(n_iter, dis_measure, random_state):
    """Internal function to check for and validate class parameters.
    Also, to return random state instance and the appropriate dissimilarity
    measure if valid.
    """
    if isinstance(n_iter, int):
        check_parameter(n_iter, low=1, param_name='n_iter')
    else:
        raise TypeError("n_iter should be int, got %s" % n_iter)

    if isinstance(dis_measure, str):
        if dis_measure not in ('aad', 'var', 'iqr'):
            raise ValueError("Unknown dissimilarity measure type, "
                             "dis_measure should be in "
                             "(\'aad\', \'var\', \'iqr\'), "
                             "got %s" % dis_measure)
        # TO-DO: 'mad': Median Absolute Deviation to be added
        # once Scipy stats version 1.3.0 is released
    else:
        raise TypeError("dis_measure should be str, got %s" % dis_measure)

    return check_random_state(random_state), _aad if dis_measure == 'aad' \
        else (np.var if dis_measure == 'var'
              else (stats.iqr if dis_measure == 'iqr' else None)) 
开发者ID:yzhao062,项目名称:pyod,代码行数:26,代码来源:lmdd.py

示例12: test_graphical_lasso_cv

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def test_graphical_lasso_cv(random_state=1):
    # Sample data from a sparse multivariate normal
    dim = 5
    n_samples = 6
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.96,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    # Capture stdout, to smoke test the verbose mode
    orig_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        # We need verbose very high so that Parallel prints on stdout
        GraphicalLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
    finally:
        sys.stdout = orig_stdout

    # Smoke test with specified alphas
    GraphicalLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_graphical_lasso.py

示例13: test_graph_lasso_cv

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def test_graph_lasso_cv(random_state=1):
    # Sample data from a sparse multivariate normal
    dim = 5
    n_samples = 6
    random_state = check_random_state(random_state)
    prec = make_sparse_spd_matrix(dim, alpha=.96,
                                  random_state=random_state)
    cov = linalg.inv(prec)
    X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
    # Capture stdout, to smoke test the verbose mode
    orig_stdout = sys.stdout
    try:
        sys.stdout = StringIO()
        # We need verbose very high so that Parallel prints on stdout
        GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
    finally:
        sys.stdout = orig_stdout

    # Smoke test with specified alphas
    GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:22,代码来源:test_graph_lasso.py

示例14: test_pls_scaling

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def test_pls_scaling():
    # sanity check for scale=True
    n_samples = 1000
    n_targets = 5
    n_features = 10

    rng = check_random_state(0)

    Q = rng.randn(n_targets, n_features)
    Y = rng.randn(n_samples, n_targets)
    X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
    X *= 1000
    X_scaled = StandardScaler().fit_transform(X)

    pls = pls_.PLSRegression(n_components=5, scale=True)

    pls.fit(X, Y)
    score = pls.score(X, Y)

    pls.fit(X_scaled, Y)
    score_scaled = pls.score(X_scaled, Y)

    assert_approx_equal(score, score_scaled) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:25,代码来源:test_pls.py

示例15: test_iforest_parallel_regression

# 需要导入模块: from sklearn import utils [as 别名]
# 或者: from sklearn.utils import check_random_state [as 别名]
def test_iforest_parallel_regression():
    """Check parallel regression."""
    rng = check_random_state(0)

    X_train, X_test, y_train, y_test = train_test_split(boston.data,
                                                        boston.target,
                                                        random_state=rng)

    ensemble = IsolationForest(n_jobs=3,
                               random_state=0).fit(X_train)

    ensemble.set_params(n_jobs=1)
    y1 = ensemble.predict(X_test)
    ensemble.set_params(n_jobs=2)
    y2 = ensemble.predict(X_test)
    assert_array_almost_equal(y1, y2)

    ensemble = IsolationForest(n_jobs=1,
                               random_state=0).fit(X_train)

    y3 = ensemble.predict(X_test)
    assert_array_almost_equal(y1, y3) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:24,代码来源:test_iforest.py


注:本文中的sklearn.utils.check_random_state方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。