当前位置: 首页>>代码示例>>Python>>正文


Python metrics.mean_absolute_error方法代码示例

本文整理汇总了Python中sklearn.metrics.mean_absolute_error方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.mean_absolute_error方法的具体用法?Python metrics.mean_absolute_error怎么用?Python metrics.mean_absolute_error使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.metrics的用法示例。


在下文中一共展示了metrics.mean_absolute_error方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: proxy_a_distance

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def proxy_a_distance(source_X, target_X):
    """
    Compute the Proxy-A-Distance of a source/target representation
    """
    nb_source = np.shape(source_X)[0]
    nb_target = np.shape(target_X)[0]

    train_X = np.vstack((source_X, target_X))
    train_Y = np.hstack((np.zeros(nb_source, dtype=int),
                         np.ones(nb_target, dtype=int)))

    clf = svm.LinearSVC(random_state=0)
    clf.fit(train_X, train_Y)
    y_pred = clf.predict(train_X)
    error = metrics.mean_absolute_error(train_Y, y_pred)
    dist = 2 * (1 - 2 * error)
    return dist 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:19,代码来源:BDA.py

示例2: calculate_regression_metrics

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
    """
    Given a trained estimator, calculate metrics.

    Args:
        trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
        y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
        x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)

    Returns:
        dict: A dictionary of metrics objects
    """
    # Get predictions
    predictions = trained_sklearn_estimator.predict(x_test)

    # Calculate individual metrics
    mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
    mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)

    result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}

    return result 
开发者ID:HealthCatalyst,项目名称:healthcareai-py,代码行数:24,代码来源:model_eval.py

示例3: score_regression

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def score_regression(y, y_hat, report=True):
    """
    Create regression score
    :param y:
    :param y_hat:
    :return:
    """
    r2 = r2_score(y, y_hat)
    rmse = sqrt(mean_squared_error(y, y_hat))
    mae = mean_absolute_error(y, y_hat)

    report_string = "---Regression Score--- \n"
    report_string += "R2 = " + str(r2) + "\n"
    report_string += "RMSE = " + str(rmse) + "\n"
    report_string += "MAE = " + str(mae) + "\n"

    if report:
        print(report_string)

    return mae, report_string 
开发者ID:mbernico,项目名称:snape,代码行数:22,代码来源:score_dataset.py

示例4: test_metrics_from_list

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_metrics_from_list():
    """
    Check getting functions from a list of metric names
    """
    default = ModelBuilder.metrics_from_list()
    assert default == [
        metrics.explained_variance_score,
        metrics.r2_score,
        metrics.mean_squared_error,
        metrics.mean_absolute_error,
    ]

    specifics = ModelBuilder.metrics_from_list(
        ["sklearn.metrics.adjusted_mutual_info_score", "sklearn.metrics.r2_score"]
    )
    assert specifics == [metrics.adjusted_mutual_info_score, metrics.r2_score] 
开发者ID:equinor,项目名称:gordo,代码行数:18,代码来源:test_builder.py

示例5: test_experiment_cat_custom_eval

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_experiment_cat_custom_eval(tmpdir_name):
    X, y = make_regression_df(n_samples=1024, n_num_features=10, n_cat_features=2,
                              random_state=0, id_column='user_id')

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0)

    params = {
        'max_depth': 8,
        'num_boost_round': 100,
        'eval_metric': 'MAE'
    }

    result = run_experiment(params, X_train, y_train, X_test, tmpdir_name,
                            algorithm_type='cat', eval_func=mean_absolute_error)

    assert mean_absolute_error(y_train, result.oof_prediction) == result.metrics[-1]
    _check_file_exists(tmpdir_name) 
开发者ID:nyanp,项目名称:nyaggle,代码行数:19,代码来源:test_run.py

示例6: test_multioutput_regression

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_multioutput_regression():
    y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
    y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])

    error = mean_squared_error(y_true, y_pred)
    assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)

    error = mean_squared_log_error(y_true, y_pred)
    assert_almost_equal(error, 0.200, decimal=2)

    # mean_absolute_error and mean_squared_error are equal because
    # it is a binary problem.
    error = mean_absolute_error(y_true, y_pred)
    assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)

    error = r2_score(y_true, y_pred, multioutput='variance_weighted')
    assert_almost_equal(error, 1. - 5. / 2)
    error = r2_score(y_true, y_pred, multioutput='uniform_average')
    assert_almost_equal(error, -.875) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:21,代码来源:test_regression.py

示例7: test_regression_metrics_at_limits

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_regression_metrics_at_limits():
    assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(mean_squared_log_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(max_error([0.], [0.]), 0.00, 2)
    assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
    assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
    assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
                        "used when targets contain negative values.",
                        mean_squared_log_error, [-1.], [-1.])
    assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
                        "used when targets contain negative values.",
                        mean_squared_log_error, [1., 2., 3.], [1., -2., 3.])
    assert_raises_regex(ValueError, "Mean Squared Logarithmic Error cannot be "
                        "used when targets contain negative values.",
                        mean_squared_log_error, [1., -2., 3.], [1., 2., 3.]) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:19,代码来源:test_regression.py

示例8: test_regression_custom_weights

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_regression_custom_weights():
    y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
    y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]

    msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
    maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
    rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
    evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])

    assert_almost_equal(msew, 0.39, decimal=2)
    assert_almost_equal(maew, 0.475, decimal=3)
    assert_almost_equal(rw, 0.94, decimal=2)
    assert_almost_equal(evsw, 0.94, decimal=2)

    # Handling msle separately as it does not accept negative inputs.
    y_true = np.array([[0.5, 1], [1, 2], [7, 6]])
    y_pred = np.array([[0.5, 2], [1, 2.5], [8, 8]])
    msle = mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
    msle2 = mean_squared_error(np.log(1 + y_true), np.log(1 + y_pred),
                               multioutput=[0.3, 0.7])
    assert_almost_equal(msle, msle2, decimal=2) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:23,代码来源:test_regression.py

示例9: test_trigonometric

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_trigonometric():
    """Check that using trig functions work and that results differ"""

    est1 = SymbolicRegressor(population_size=100, generations=2,
                             random_state=0)
    est1.fit(boston.data[:400, :], boston.target[:400])
    est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
                               boston.target[400:])

    est2 = SymbolicRegressor(population_size=100, generations=2,
                             function_set=['add', 'sub', 'mul', 'div',
                                           'sin', 'cos', 'tan'],
                             random_state=0)
    est2.fit(boston.data[:400, :], boston.target[:400])
    est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
                               boston.target[400:])

    assert(abs(est1 - est2) > 0.01) 
开发者ID:trevorstephens,项目名称:gplearn,代码行数:20,代码来源:test_genetic.py

示例10: test_subsample

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_subsample():
    """Check that subsample work and that results differ"""

    est1 = SymbolicRegressor(population_size=100, generations=2,
                             max_samples=1.0, random_state=0)
    est1.fit(boston.data[:400, :], boston.target[:400])
    est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
                               boston.target[400:])

    est2 = SymbolicRegressor(population_size=100, generations=2,
                             max_samples=0.5, random_state=0)
    est2.fit(boston.data[:400, :], boston.target[:400])
    est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
                               boston.target[400:])

    assert(abs(est1 - est2) > 0.01) 
开发者ID:trevorstephens,项目名称:gplearn,代码行数:18,代码来源:test_genetic.py

示例11: test_parsimony_coefficient

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def test_parsimony_coefficient():
    """Check that parsimony coefficients work and that results differ"""

    est1 = SymbolicRegressor(population_size=100, generations=2,
                             parsimony_coefficient=0.001, random_state=0)
    est1.fit(boston.data[:400, :], boston.target[:400])
    est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
                               boston.target[400:])

    est2 = SymbolicRegressor(population_size=100, generations=2,
                             parsimony_coefficient='auto', random_state=0)
    est2.fit(boston.data[:400, :], boston.target[:400])
    est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
                               boston.target[400:])

    assert(abs(est1 - est2) > 0.01) 
开发者ID:trevorstephens,项目名称:gplearn,代码行数:18,代码来源:test_genetic.py

示例12: pla

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def pla(data, period=15):
    N = int(len(data)/period)
    orig_x = range(0,len(data))
    tck = splrep(orig_x, data,s=0)
    test_xs = np.linspace(0,len(data),N)
    spline_ys = splev(test_xs, tck)
    spline_yps = splev(test_xs, tck, der=1)
    xi = np.unique(tck[0])
    yi = [[splev(x, tck, der=j) for j in xrange(3)] for x in xi]
    P = interpolate.PiecewisePolynomial(xi,yi,orders=1)
    test_ys = P(test_xs)
    #inter_y = interp0(test_xs, test_ys, orig_x)
    inter_y = interp1(test_xs, test_ys, orig_x)
    
    mae = sqrt(mean_absolute_error(inter_y, data))
    #       mae = np.var(inter_y-data)
    return mae

#def paa(data, period=15): 
开发者ID:plastering,项目名称:plastering,代码行数:21,代码来源:feature_extractor.py

示例13: paa

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def paa(data, period=15):
    numCoeff = int(len(data)/period)
    data = data[:numCoeff*period]
    data = data[:int(len(data)/numCoeff)*numCoeff]
    origData = deepcopy(data)
    N = len(data)
    segLen = int(N/numCoeff)
    sN = np.reshape(data, (numCoeff, segLen))
    g = lambda data: np.mean(data)
    #       avg = np.mean(sN)
    avg = map(g,sN)
    data = np.matlib.repmat(avg, segLen, 1)
    data = data.ravel(order='F')
#       plt.plot(data)
#       plt.plot(origData)
#       plt.show()
#rmse = sqrt(mean_squared_error(data, origData))
    mae = sqrt(mean_absolute_error(data, origData))
#       mae = np.var(origData-data)
    return mae 
开发者ID:plastering,项目名称:plastering,代码行数:22,代码来源:feature_extractor.py

示例14: eva_regress

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def eva_regress(y_true, y_pred):
    """Evaluation
    evaluate the predicted resul.

    # Arguments
        y_true: List/ndarray, ture data.
        y_pred: List/ndarray, predicted data.
    """

    mape = MAPE(y_true, y_pred)
    vs = metrics.explained_variance_score(y_true, y_pred)
    mae = metrics.mean_absolute_error(y_true, y_pred)
    mse = metrics.mean_squared_error(y_true, y_pred)
    r2 = metrics.r2_score(y_true, y_pred)
    print('explained_variance_score:%f' % vs)
    print('mape:%f%%' % mape)
    print('mae:%f' % mae)
    print('mse:%f' % mse)
    print('rmse:%f' % math.sqrt(mse))
    print('r2:%f' % r2) 
开发者ID:zbj6633,项目名称:TrafficFlowPrediction,代码行数:22,代码来源:test.py

示例15: macro_mae

# 需要导入模块: from sklearn import metrics [as 别名]
# 或者: from sklearn.metrics import mean_absolute_error [as 别名]
def macro_mae(y_test, y_pred, classes):
    cat_to_class_mapping = {v: int(k) for k, v in
                            get_labels_to_categories_map(classes).items()}
    _y_test = [cat_to_class_mapping[y] for y in y_test]
    _y_pred = [cat_to_class_mapping[y] for y in y_pred]

    c = Counter(_y_pred)
    print(c)

    classes = set(_y_test)
    micro_m = {}
    for c in classes:
        class_sentences = [(t, p) for t, p in zip(_y_test, _y_pred) if t == c]
        yt = [y[0] for y in class_sentences]
        yp = [y[1] for y in class_sentences]
        micro_m[c] = mean_absolute_error(yt, yp)

    # pprint.pprint(sorted(micro_m.items(), key=lambda x: x[1], reverse=True))

    return numpy.mean(list(micro_m.values())) 
开发者ID:cbaziotis,项目名称:datastories-semeval2017-task4,代码行数:22,代码来源:generic.py


注:本文中的sklearn.metrics.mean_absolute_error方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。