当前位置: 首页>>代码示例>>Python>>正文


Python lightgbm.LGBMRegressor方法代码示例

本文整理汇总了Python中lightgbm.LGBMRegressor方法的典型用法代码示例。如果您正苦于以下问题:Python lightgbm.LGBMRegressor方法的具体用法?Python lightgbm.LGBMRegressor怎么用?Python lightgbm.LGBMRegressor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lightgbm的用法示例。


在下文中一共展示了lightgbm.LGBMRegressor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: Train

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def Train(data, modelcount, censhu, yanzhgdata):
    model = lgbm.LGBMRegressor(boosting_type='gbdt', objective='regression', num_leaves=1200,
                                learning_rate=0.17, n_estimators=modelcount, max_depth=censhu,
                                metric='rmse', bagging_fraction=0.8, feature_fraction=0.8, reg_lambda=0.9)

    model.fit(data[:, :-1], data[:, -1])
    # 给出训练数据的预测值
    train_out = model.predict(data[:, :-1])
    # 计算MSE
    train_mse = mse(data[:, -1], train_out)

    # 给出验证数据的预测值
    add_yan = model.predict(yanzhgdata[:, :-1])
    # 计算MSE
    add_mse = mse(yanzhgdata[:, -1], add_yan)
    print(train_mse, add_mse)
    return train_mse, add_mse

# 最终确定组合的函数 
开发者ID:Anfany,项目名称:Machine-Learning-for-Beginner-by-Python3,代码行数:21,代码来源:LightGBM_Regression_pm25.py

示例2: get_feature_importances

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def get_feature_importances(data, shuffle, cats=[], seed=None):
    # Gather real features
    train_features = [f for f in data if f not in [target] + cols2ignore]

    # Shuffle target if required
    y = data[target].copy()
    if shuffle:
        y = data[target].copy().sample(frac=1.0, random_state=seed + 4)
    from h2oaicore.lightgbm_dynamic import got_cpu_lgb, got_gpu_lgb
    import lightgbm as lgbm
    if is_regression:
        model = lgbm.LGBMRegressor(random_state=seed, importance_type=importance, **lgbm_params)
    else:
        model = lgbm.LGBMClassifier(random_state=seed, importance_type=importance, **lgbm_params)
        y = LabelEncoder().fit_transform(y)
    # Fit LightGBM in RF mode, yes it's quicker than sklearn RandomForest
    model.fit(data[train_features], y, categorical_feature=cats)
    # Get feature importances
    imp_df = pd.DataFrame()
    imp_df["feature"] = list(train_features)
    imp_df["importance"] = model.feature_importances_

    return imp_df 
开发者ID:h2oai,项目名称:driverlessai-recipes,代码行数:25,代码来源:feature_selection.py

示例3: _dispatch_gbdt_class

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def _dispatch_gbdt_class(algorithm_type: str, type_of_target: str):
    is_regression = type_of_target == 'continuous'

    if algorithm_type == 'lgbm':
        requires_lightgbm()
        from lightgbm import LGBMClassifier, LGBMRegressor
        return LGBMRegressor if is_regression else LGBMClassifier
    elif algorithm_type == 'cat':
        requires_catboost()
        from catboost import CatBoostClassifier, CatBoostRegressor
        return CatBoostRegressor if is_regression else CatBoostClassifier
    else:
        requires_xgboost()
        assert algorithm_type == 'xgb'
        from xgboost import XGBClassifier, XGBRegressor
        return XGBRegressor if is_regression else XGBClassifier 
开发者ID:nyanp,项目名称:nyaggle,代码行数:18,代码来源:run.py

示例4: test_04_lgbm_regressor

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_04_lgbm_regressor(self):
        print("\ntest 04 (lgbm regressor with preprocessing)\n")
        auto = pd.read_csv('nyoka/tests/auto-mpg.csv')
        X = auto.drop(['mpg'], axis=1)
        y = auto['mpg']

        feature_names = [name for name in auto.columns if name not in ('mpg')]
        target_name='mpg'
        x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=101)
        pd.DataFrame(data=x_test, columns=feature_names).to_csv("test.csv",index=False)
        pipeline_obj = Pipeline([
            ('mapper', DataFrameMapper([
                ('car name', CountVectorizer()),
                (['displacement'],[StandardScaler()]) 
            ])),
            ('lgbmr',LGBMRegressor())
        ])
        pipeline_obj.fit(x_train,y_train)
        file_name = "test04lgbm.pmml"
        lgb_to_pmml(pipeline_obj, feature_names, 'mpg', file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, "test.csv")
        predictions = numpy.array(predictions)
        model_pred = pipeline_obj.predict(x_test)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:27,代码来源:testScoreWithAdapaLgbm.py

示例5: test_regressor

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_regressor(output, client, listen_port):  # noqa
    X, y, w, dX, dy, dw = _create_data('regression', output=output)

    a = dlgbm.LGBMRegressor(time_out=5, local_listen_port=listen_port, seed=42)
    a = a.fit(dX, dy, client=client, sample_weight=dw)
    p1 = a.predict(dX, client=client)
    if output != 'dataframe':
        s1 = r2_score(dy, p1)
    p1 = p1.compute()

    b = lightgbm.LGBMRegressor(seed=42)
    b.fit(X, y, sample_weight=w)
    s2 = b.score(X, y)
    p2 = b.predict(X)

    # Scores should be the same
    if output != 'dataframe':
        assert_eq(s1, s2, atol=.01)

    # Predictions should be roughly the same
    assert_eq(y, p1, rtol=1., atol=50.)
    assert_eq(y, p2, rtol=1., atol=50.) 
开发者ID:dask,项目名称:dask-lightgbm,代码行数:24,代码来源:test_core.py

示例6: test_regressor_quantile

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_regressor_quantile(output, client, listen_port, alpha):  # noqa
    X, y, w, dX, dy, dw = _create_data('regression', output=output)

    a = dlgbm.LGBMRegressor(local_listen_port=listen_port, seed=42, objective='quantile', alpha=alpha)
    a = a.fit(dX, dy, client=client, sample_weight=dw)
    p1 = a.predict(dX, client=client).compute()
    q1 = np.count_nonzero(y < p1) / y.shape[0]

    b = lightgbm.LGBMRegressor(seed=42, objective='quantile', alpha=alpha)
    b.fit(X, y, sample_weight=w)
    p2 = b.predict(X)
    q2 = np.count_nonzero(y < p2) / y.shape[0]

    # Quantiles should be right
    np.isclose(q1, alpha, atol=.1)
    np.isclose(q2, alpha, atol=.1) 
开发者ID:dask,项目名称:dask-lightgbm,代码行数:18,代码来源:test_core.py

示例7: recspre

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def recspre(exstr, predata, datadict, zhe, count=100):
    tree, te = exstr.split('-')
    model = lgbm.LGBMRegressor(objective='regression', learning_rate=0.15, num_leaves=1200,
                               n_estimators=int(tree), max_depth=int(te),
                               metric='rmse', bagging_fraction=0.8, feature_fraction=0.8, reg_lambda=0.9)
    model.fit(datadict[zhe]['train'][:, :-1], datadict[zhe]['train'][:, -1])

    # 预测
    yucede = model.predict(predata[:, :-1])
    # 为了便于展示,选100条数据进行展示
    zongleng = np.arange(len(yucede))
    randomnum = np.random.choice(zongleng, count, replace=False)

    yucede_se = list(np.array(yucede)[randomnum])

    yuce_re = list(np.array(predata[:, -1])[randomnum])

    # 对比
    plt.figure(figsize=(17, 9))
    plt.subplot(2, 1, 1)
    plt.plot(list(range(len(yucede_se))), yucede_se, 'r--', label='预测', lw=2)
    plt.scatter(list(range(len(yuce_re))), yuce_re, c='b', marker='.', label='真实', lw=2)
    plt.xlim(-1, count + 1)
    plt.legend()
    plt.title('预测和真实值对比[最大树数%d]' % int(tree))

    plt.subplot(2, 1, 2)
    plt.plot(list(range(len(yucede_se))), np.array(yuce_re) - np.array(yucede_se), 'k--', marker='s', label='真实-预测', lw=2)
    plt.legend()
    plt.title('预测和真实值相对误差')

    plt.savefig(r'C:\Users\GWT9\Desktop\duibi_lightgbm.jpg')
    return '预测真实对比完毕'

# 最终的主函数 
开发者ID:Anfany,项目名称:Machine-Learning-for-Beginner-by-Python3,代码行数:37,代码来源:LightGBM_Regression_pm25.py

示例8: test_regression

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_regression():
    estimator = lightgbm.LGBMRegressor(n_estimators=2, random_state=1,
                                       max_depth=1)
    utils.get_regression_model_trainer()(estimator)

    assembler = assemblers.LightGBMModelAssembler(estimator)
    actual = assembler.assemble()

    expected = ast.BinNumExpr(
        ast.IfExpr(
            ast.CompExpr(
                ast.FeatureRef(5),
                ast.NumVal(6.918),
                ast.CompOpType.GT),
            ast.NumVal(24.011454621684155),
            ast.NumVal(22.289277544391084)),
        ast.IfExpr(
            ast.CompExpr(
                ast.FeatureRef(12),
                ast.NumVal(9.63),
                ast.CompOpType.GT),
            ast.NumVal(-0.49461212269771115),
            ast.NumVal(0.7174324413014594)),
        ast.BinNumOpType.ADD)

    assert utils.cmp_exprs(actual, expected) 
开发者ID:BayesWitnesses,项目名称:m2cgen,代码行数:28,代码来源:test_lightgbm.py

示例9: LightGBM_First

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def LightGBM_First(self, data, max_depth=9, n_estimators=380):
        model = lgbm.LGBMRegressor(boosting_type='gbdt', objective='regression', num_leaves=1200,
                                   learning_rate=0.17, n_estimators=n_estimators, max_depth=max_depth,
                                   metric='rmse', bagging_fraction=0.8, feature_fraction=0.8, reg_lambda=0.9)
        model.fit(data['train'][:, :-1], data['train'][:, -1])
        # 注意存储验证数据集结果和预测数据集结果的不同
        # 训练数据集的预测结果
        xul = model.predict(data['train'][:, :-1])
        # 验证的预测结果
        yanre = model.predict(data['test'][:, :-1])
        # 预测的预测结果
        prer = model.predict(data['predict'][:, :-1])
        # 储存
        self.yanzhneg_pr.append(yanre)
        self.predi.append(prer)
        # 分别计算训练、验证、预测的误差
        # 每计算一折后,要计算训练、验证、预测数据的误差
        xx = self.RMSE(xul, data['train'][:, -1])
        yy = self.RMSE(yanre, data['test'][:, -1])
        pp = self.RMSE(prer, data['predict'][:, -1])
        # 储存误差
        self.error_dict['LightGBM'] = [xx, yy, pp]
        # 验证数据集的真实输出结果
        self.yanzhneg_real = data['test'][:, -1]

        # 预测数据集的真实输出结果
        self.preal = data['predict'][:, -1]
        return print('1层中的LightGBM运行完毕')

    # XGBoost 
开发者ID:Anfany,项目名称:Machine-Learning-for-Beginner-by-Python3,代码行数:32,代码来源:Blending_Regression_pm25.py

示例10: test_regression_random_forest

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_regression_random_forest():
    estimator = lightgbm.LGBMRegressor(boosting_type="rf", n_estimators=2,
                                       random_state=1, max_depth=1,
                                       subsample=0.7, subsample_freq=1)
    utils.get_regression_model_trainer()(estimator)

    assembler = assemblers.LightGBMModelAssembler(estimator)
    actual = assembler.assemble()

    expected = ast.BinNumExpr(
        ast.BinNumExpr(
            ast.IfExpr(
                ast.CompExpr(
                    ast.FeatureRef(5),
                    ast.NumVal(6.954000000000001),
                    ast.CompOpType.GT),
                ast.NumVal(37.24347877367631),
                ast.NumVal(19.936999995530854)),
            ast.IfExpr(
                ast.CompExpr(
                    ast.FeatureRef(5),
                    ast.NumVal(6.971500000000001),
                    ast.CompOpType.GT),
                ast.NumVal(38.48600037864964),
                ast.NumVal(20.183783757300255)),
            ast.BinNumOpType.ADD),
        ast.NumVal(0.5),
        ast.BinNumOpType.MUL)

    assert utils.cmp_exprs(actual, expected) 
开发者ID:BayesWitnesses,项目名称:m2cgen,代码行数:32,代码来源:test_lightgbm.py

示例11: test_03_lgbm_regressor

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_03_lgbm_regressor(self):
        print("\ntest 03 (lgbm regressor without preprocessing)\n")
        model = LGBMRegressor()
        pipeline_obj = Pipeline([
            ("model", model)
        ])
        pipeline_obj.fit(self.X,self.Y)
        file_name = "test03lgbm.pmml"
        lgb_to_pmml(pipeline_obj, self.features, 'Species', file_name)
        model_name  = self.adapa_utility.upload_to_zserver(file_name)
        predictions, _ = self.adapa_utility.score_in_zserver(model_name, self.test_file)
        model_pred = pipeline_obj.predict(self.X)
        predictions = numpy.array(predictions)
        self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:16,代码来源:testScoreWithAdapaLgbm.py

示例12: fit

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def fit(self, X, y=None, sample_weight=None, client=None, **kwargs):
        if client is None:
            client = default_client()

        model_factory = lightgbm.LGBMRegressor
        params = self.get_params(True)
        model = train(client, X, y, params, model_factory, sample_weight, **kwargs)

        self.set_params(**model.get_params())
        self._copy_extra_params(model, self)

        return self 
开发者ID:dask,项目名称:dask-lightgbm,代码行数:14,代码来源:core.py

示例13: to_local

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def to_local(self):
        model = lightgbm.LGBMRegressor(**self.get_params())
        self._copy_extra_params(self, model)
        return model 
开发者ID:dask,项目名称:dask-lightgbm,代码行数:6,代码来源:core.py

示例14: test_regressor_local_predict

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def test_regressor_local_predict(client, listen_port):  # noqa
    X, y, w, dX, dy, dw = _create_data('regression', output='array')

    a = dlgbm.LGBMRegressor(local_listen_port=listen_port, seed=42)
    a = a.fit(dX, dy, sample_weight=dw, client=client)
    p1 = a.predict(dX)
    p2 = a.to_local().predict(X)
    s1 = r2_score(dy, p1)
    p1 = p1.compute()
    s2 = a.to_local().score(X, y)
    print(s1)

    # Predictions and scores should be the same
    assert_eq(p1, p2)
    np.isclose(s1, s2) 
开发者ID:dask,项目名称:dask-lightgbm,代码行数:17,代码来源:test_core.py

示例15: _checkGBDTRegressor

# 需要导入模块: import lightgbm [as 别名]
# 或者: from lightgbm import LGBMRegressor [as 别名]
def _checkGBDTRegressor(gbdt):
	if hasattr(gbdt, "apply"):
		return gbdt
	else:
		try:
			from lightgbm import LGBMRegressor
			if isinstance(gbdt, LGBMRegressor):
				return gbdt
		except ImportError:
			pass
	raise ValueError("GBDT class " + _class_name(gbdt) + " is not supported") 
开发者ID:jpmml,项目名称:sklearn2pmml,代码行数:13,代码来源:__init__.py


注:本文中的lightgbm.LGBMRegressor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。