当前位置: 首页>>代码示例>>Python>>正文


Python metrics.mean_absolute_error函数代码示例

本文整理汇总了Python中sklearn.metrics.mean_absolute_error函数的典型用法代码示例。如果您正苦于以下问题:Python mean_absolute_error函数的具体用法?Python mean_absolute_error怎么用?Python mean_absolute_error使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了mean_absolute_error函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_mse

def compute_mse(model,x_train_current_tmp,YTrain,x_test_current_tmp,YTest, score ,values_TM = []):
    model.fit(x_train_current_tmp, YTrain)
    y_pred_train = model.predict(x_train_current_tmp)
    y_pred_test = model.predict(x_test_current_tmp)

    if len(values_TM)!=0:
        abs_error_train = 100.*mean_absolute_error(YTrain,y_pred_train)*len(YTrain)/(89.7* values_TM[0, 0] * values_TM[0,1])
        print("abs train", abs_error_train)

        abs_error_test = 100.*mean_absolute_error(YTest,y_pred_test)*len(YTest)/(89.7* values_TM[1, 0] * values_TM[1,1])
        print("abs test", abs_error_test)

        mse_error_train = 100.*np.sqrt(mean_squared_error(YTrain,y_pred_train)*len(YTrain)/(values_TM[0, 0] * values_TM[0, 1]))/(89.7)
        print("mean squared error train", mse_error_train )

        mse_error_test = 100.*np.sqrt(mean_squared_error(YTest,y_pred_test)*len(YTest)/(values_TM[1, 0] * values_TM[1, 1]))/(89.7)
        print("mean squared error test", mse_error_test )

    if score=="mean_squared_error":
        new_loss = mean_squared_error(YTest,y_pred_test)
    elif score== "mean_absolute_error":
        new_loss = mean_absolute_error(YTest,y_pred_test)
    else:
        new_loss = r2_score(YTest,y_pred_test)
    beta = model.coef_

    if x_train_current_tmp.shape[1]==1:
        beta = np.array([beta])
    beta = beta.reshape([len(beta),1])

    return new_loss, beta
开发者ID:marty10,项目名称:LASSO,代码行数:31,代码来源:Lasso_utils.py

示例2: make_model

def make_model(data,tc):

    train_data = data.sample(frac=.8)
    test_data = data.drop(train_data.index)
    train_y = train_data['T/Tc']
    train_X = train_data.drop(['T/Tc','temperature'], axis=1)
    test_y = test_data['T/Tc']
    test_X = test_data.drop(['T/Tc','temperature'], axis=1)

#    model = XGBClassifier(n_estimators = 1000,max_depth=8, learning_rate=0.05)
#    model.fit(train_X, train_y, early_stopping_rounds=10,
#                 eval_set=[(test_X, test_y)], verbose=True)
#    xgb.plot_tree(model)

    model = svm.SVC(kernel='rbf', gamma=1, C=1, verbose = True)
    model.fit(train_X, train_y)
    predictions = model.predict(test_X)
    print("Mean Absolute Error : " + str(mean_absolute_error(np.array(predictions), test_y)))

    train_y = train_data['temperature']/tc
    test_y = test_data['temperature']/tc

#    model2 = XGBRegressor(n_estimators = 1000,max_depth=8, learning_rate=0.05)
#    model2.fit(train_X, train_y, early_stopping_rounds=10,eval_metric='mae',
#                 eval_set=[(test_X, test_y)], verbose=True)

    model2 = svm.SVR(kernel='rbf', gamma=.5, C=1, verbose = True)
    model2.fit(train_X, train_y)

    predictions = model2.predict(test_X)
    print("Mean Absolute Error : " + str(mean_absolute_error(np.array(predictions), test_y)))

    return [model,model2]
开发者ID:aramamurti,项目名称:BEC-monopoles,代码行数:33,代码来源:ml_analysis.py

示例3: prediction_performance

def prediction_performance(model, Xtest, Ytest, numberCategories):
    # Calculate metric for logistic regression performance.
    if(numberCategories == 1):
        # Get metrics for binary classification.
        YDistribution = model.predict_proba(Xtest)[:,1]
        YClassification = model.predict(Xtest)
        auc = roc_auc_score(Ytest, YDistribution)
        print("AUC", auc)
        MAE = mean_absolute_error(Ytest, YDistribution)
        print("MAE", MAE)
        accuracy = 1 - mean_absolute_error(YClassification, Ytest)
        print("Accuracy", accuracy)
        metrics = [accuracy, auc, MAE]
    else:
        # Get metric for multiple class classification.
        YPredictions = model.predict(Xtest)
        YDistribution = model.predict_proba(Xtest)
        YTestLabels = label_data(Ytest)
        accuracy = model.score(Xtest, YTestLabels)
        print("Accuracy", accuracy)
        avAUC = evaluate_auc_score(model, Xtest, Ytest)
        print("Av AUC", avAUC)
        #auc = roc_auc_score(Ytest, YPredictions)
        MAE = mean_absolute_error(Ytest, YDistribution)
        print("MAE", MAE)
        metrics = [accuracy, avAUC, MAE]
    return metrics
开发者ID:hughheadley,项目名称:pookeyPython,代码行数:27,代码来源:logisticTraining.py

示例4: test_regressor

def test_regressor(train, test, feature_extractor, target_transformer, regressor):
	(train_raw_X, train_raw_y) = (train, train['SalaryNormalized'])
	(test_raw_X, test_raw_y) = (test, test['SalaryNormalized'])


	print 'feature extraction ...'
	train_y = target_transformer.transform(train_raw_y)
	test_y = target_transformer.transform(test_raw_y)
	train_X = feature_extractor.fit_transform(train_raw_X, train_y)
	test_X = feature_extractor.transform(test_raw_X)

	print 'fit regression model ...'
	try:
		regressor.fit(train_X, train_y)
		train_raw_yhat = target_transformer.r_transform(regressor.predict(train_X))
		test_raw_yhat = target_transformer.r_transform(regressor.predict(test_X))
	except TypeError:
		regressor.fit(train_X.toarray(), train_y)
		train_raw_yhat = target_transformer.r_transform(regressor.predict(train_X.toarray()))
		test_raw_yhat = target_transformer.r_transform(regressor.predict(test_X.toarray()))

	print 'evaluate error metrics ...'
	train_error = metrics.mean_absolute_error(train_raw_y, train_raw_yhat)
	test_error = metrics.mean_absolute_error(test_raw_y, test_raw_yhat)
	print 'Train error: ', train_error
	print 'Test error:', test_error
开发者ID:alyssonbispo,项目名称:fun_with_kaggle,代码行数:26,代码来源:test_models.py

示例5: predict_variance_inf_phase1

def predict_variance_inf_phase1(budget, hum_train_means, temp_train_means, hum_train_vars, temp_train_vars):
    """Method to make predictions based on max-variance active inference."""         
    start_hum = 0
    window_hum = None
    window_temp = None
    i = 0

    hum_preds = np.ones((50, 96))
    temp_preds = np.ones((50, 96))

    for t in global_times:
        if budget > 0:
            window_hum = np.argpartition(hum_train_vars[t], -budget)[-budget:]
            window_temp = np.argpartition(temp_train_vars[t], -budget)[-budget:]
        else:
            window_hum = np.array([])
            window_temp = np.array([])

        hum_pred, temp_pred = makePreds_phase1(window_hum, window_temp, hum_train_means, temp_train_means, i, t)

        hum_preds[:, i] = copy.deepcopy(hum_pred)
        temp_preds[:, i] = copy.deepcopy(temp_pred)
        
        i += 1

    hum_mean_err = mean_absolute_error(hum_test, hum_preds)
    temp_mean_err = mean_absolute_error(temp_test, temp_preds)

    return hum_preds, temp_preds, hum_mean_err, temp_mean_err
开发者ID:ironhide23586,项目名称:Sensor-Network-CS583,代码行数:29,代码来源:Phase3_00.py

示例6: normalEquation

def normalEquation(features, features_validation, values, values_validation):

    M = numpy.dot(features.T, features)
    print "Transposta de f por f"
    print M.shape
    M = numpy.array(M)
    print "Transformou em array"
    print M.shape
    M = numpy.linalg.pinv(M)
    print "Inversa"
    print M.shape
    M = numpy.dot(M, features.T)
    print "Multiplicou por transposta de f"
    print M.shape
    theta = numpy.dot(M, values)
    #M = numpy.linalg.pinv(M)


    print theta.shape
    print features.shape
    print theta

    predictions = numpy.dot(theta, features.T)
    pred_validation = numpy.dot(theta, features_validation.T)

    print predictions

    print "MEAN ABSOLUTE ERROR "
    print mean_absolute_error(values, predictions)

    print "MEAN ABSOLUTE ERROR (validation) "
    print mean_absolute_error(values_validation, pred_validation)
开发者ID:victorleal,项目名称:mlearning,代码行数:32,代码来源:main.py

示例7: test_continue_train

 def test_continue_train(self):
     X, y = load_boston(True)
     X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
     params = {
         'objective': 'regression',
         'metric': 'l1',
         'verbose': -1
     }
     lgb_train = lgb.Dataset(X_train, y_train, free_raw_data=False)
     lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train, free_raw_data=False)
     init_gbm = lgb.train(params, lgb_train, num_boost_round=20)
     model_name = 'model.txt'
     init_gbm.save_model(model_name)
     evals_result = {}
     gbm = lgb.train(params, lgb_train,
                     num_boost_round=30,
                     valid_sets=lgb_eval,
                     verbose_eval=False,
                     # test custom eval metrics
                     feval=(lambda p, d: ('mae', mean_absolute_error(p, d.get_label()), False)),
                     evals_result=evals_result,
                     init_model='model.txt')
     ret = mean_absolute_error(y_test, gbm.predict(X_test))
     self.assertLess(ret, 3.5)
     self.assertAlmostEqual(evals_result['valid_0']['l1'][-1], ret, places=5)
     for l1, mae in zip(evals_result['valid_0']['l1'], evals_result['valid_0']['mae']):
         self.assertAlmostEqual(l1, mae, places=5)
     os.remove(model_name)
开发者ID:hubei2626662,项目名称:LightGBM,代码行数:28,代码来源:test_engine.py

示例8: evaluate

def evaluate(ytest, ypred, filename='metrics.txt'):
    true_result = [1 if item > 0.5 else 0 for item in ytest]
    pred_result = [1 if item > 0.5 else 0 for item in ypred]
    
    cm = confusion_matrix(true_result, pred_result)
    print('\nConfusion matrix:')
    print(cm)
    print("\nLoss classified as loss", cm[0][0])
    print("Wins classified as wins", cm[1][1])
    print("Wins classified as loss", cm[1][0])
    print("Loss classified as wins", cm[0][1])
    print('\nAccuracy:\t', accuracy_score(true_result, pred_result))
    print('Precision:\t', precision_score(true_result, pred_result))
    print('Recall: \t', recall_score(true_result, pred_result))
    print('F1 score:\t', f1_score(true_result, pred_result))
    print('Mean absolute error:\t', mean_absolute_error(ytest, ypred))
    
    # print to file
    print("Loss classified as loss", cm[0][0], file=open(filename, "a"))
    print("Wins classified as wins", cm[1][1], file=open(filename, "a"))
    print("Wins classified as loss", cm[1][0], file=open(filename, "a"))
    print("Loss classified as wins", cm[0][1], file=open(filename, "a"))
    print('\nAccuracy:\t', accuracy_score(true_result, pred_result), file=open(filename, "a"))
    print('Precision:\t', precision_score(true_result, pred_result), file=open(filename, "a"))
    print('Recall: \t', recall_score(true_result, pred_result), file=open(filename, "a"))
    print('F1 score:\t', f1_score(true_result, pred_result), file=open(filename, "a"))
    print('Mean absolute error:\t', mean_absolute_error(ytest, ypred), file=open(filename, "a"))
开发者ID:alexandremcosta,项目名称:pucker,代码行数:27,代码来源:learn.py

示例9: cross_val

def cross_val(regressor_high,regressor_low,classifier,train):
	rows=random.sample(train.index, int(train.shape[0]*0.75))
	sample = train.ix[rows]

	crime=pd.DataFrame(sample.Total_Crime_Risk,dtype=int)
	crime['highcrime']=0
	crime.highcrime[crime.Total_Crime_Risk>crime.Total_Crime_Risk.median()]=1
	crime['GEOGRAPHY_ID']=sample.GEOGRAPHY_ID
	sample=sample.drop(train.columns[[0,-2,-1]], axis=1)
		
	model=classifier.fit(sample, crime.highcrime)
	Highcrime=model.predict(sample)
	Highcrime=np.array(Highcrime)
	sample['predicted_highcrime']=Highcrime
	
	high_areas=sample.ix[sample.predicted_highcrime==1]
	high_areas=pd.merge(high_areas, crime, on='GEOGRAPHY_ID', how= 'inner')
	high_areas_crime=high_areas.Total_Crime_Risk
	high_areas=high_areas.drop(high_areas.columns[[-1,-2,-3]],axis=1)

	low_areas=sample.ix[sample.predicted_highcrime==0]
	low_areas=pd.merge(low_areas, crime, on='GEOGRAPHY_ID', how= 'inner')
	low_areas_crime=low_areas.Total_Crime_Risk
	low_areas=low_areas.drop(low_areas.columns[[-1,-2,-3]],axis=1)

	model_high=regressor_high.fit(high_areas, high_areas_crime)
	high_crime=model_high.predict(high_areas)
	model_low=regressor_low.fit(low_areas, low_areas_crime)
	low_crime=model_low.predict(low_areas)

	high_error=mean_absolute_error(high_areas_crime,high_crime)
	low_error=mean_absolute_error(low_areas_crime,low_crime)
	print high_error,low_error, ((high_error+low_error)/2)
开发者ID:manugarri,项目名称:Data_Science,代码行数:33,代码来源:main.py

示例10: tst

def tst(X, Y, k=3, rad=4, mode='k'):
    trX = X[:-1200]
    trY = Y[:-1200]
    tstX = X[-400:]
    tstY = Y[-400:]

    nnlr = NNLR(k, rad, mode)

    nnlr.fit(trX, trY)

    pred = nnlr.predict(trX)
    print 'Training Set'
    print 'Root Mean Squared Error'
    print mean_squared_error(trY, pred)**.5
    print 'Root Mean Error'
    print mean_absolute_error(trY, pred)
    # print zip(pred, trX)[:5]
    print nnlr.active

    pred = nnlr.predict(tstX)
    print 'Test Set'
    print 'Root Mean Squared Error'
    print mean_squared_error(tstY, pred)**.5
    print 'Root Mean Error'
    print mean_absolute_error(tstY, pred)
    # print zip(pred, tstY)[:5]
    print nnlr.active
开发者ID:edgelord,项目名称:Social-Outcome-Modeling,代码行数:27,代码来源:preprocess.py

示例11: main

def main():
    DOC = """
================================================================================
    Compare the prediction accuracy of different models on the boston dataset
================================================================================
    """
    print(DOC)
    from sklearn import cross_validation, datasets
    boston = datasets.load_boston()
    X, y = boston.data, np.round(boston.target)
    #X -= X.mean()
    y -= y.min()

    idx = np.argsort(y)
    X = X[idx]
    y = y[idx]
    cv = cross_validation.ShuffleSplit(y.size, n_iter=50, test_size=.1, random_state=0)
    score_logistic = []
    score_ordinal_logistic = []
    score_ridge = []
    for i, (train, test) in enumerate(cv):
        #test = train
        if not np.all(np.unique(y[train]) == np.unique(y)):
            # we need the train set to have all different classes
            continue
        assert np.all(np.unique(y[train]) == np.unique(y))
        train = np.sort(train)
        test = np.sort(test)
        w, theta = ordinal_logistic_fit(X[train], y[train], verbose=True,
                                        solver='TNC')
        pred = ordinal_logistic_predict(w, theta, X[test])
        s = metrics.mean_absolute_error(y[test], pred)
        print('ERROR (ORDINAL)  fold %s: %s' % (i+1, s))
        score_ordinal_logistic.append(s)

        from sklearn import linear_model
        clf = linear_model.LogisticRegression(C=1.)
        clf.fit(X[train], y[train])
        pred = clf.predict(X[test])
        s = metrics.mean_absolute_error(y[test], pred)
        print('ERROR (LOGISTIC) fold %s: %s' % (i+1, s))
        score_logistic.append(s)

        from sklearn import linear_model
        clf = linear_model.Ridge(alpha=1.)
        clf.fit(X[train], y[train])
        pred = np.round(clf.predict(X[test]))
        s = metrics.mean_absolute_error(y[test], pred)
        print('ERROR (RIDGE)    fold %s: %s' % (i+1, s))
        score_ridge.append(s)


    print()
    print('MEAN ABSOLUTE ERROR (ORDINAL LOGISTIC):    %s' % np.mean(score_ordinal_logistic))
    print('MEAN ABSOLUTE ERROR (LOGISTIC REGRESSION): %s' % np.mean(score_logistic))
    print('MEAN ABSOLUTE ERROR (RIDGE REGRESSION):    %s' % np.mean(score_ridge))
    # print('Chance level is at %s' % (1. / np.unique(y).size))
    
    return np.mean(score_ridge)
开发者ID:fluxium,项目名称:statsintro,代码行数:59,代码来源:ologit.py

示例12: testModel

def testModel(
    model, layerSizes, Xtrain, Ytrain, Xtest, Ytest, learningRate, epochs,
    batchSize, optimizer, resultsFile = "lossOptLog.txt", printResults = False,
    elapsedTime = False):
    lossCategories = Ytrain.shape[1]
    numberHiddenLayers = len(layerSizes) - 2
    inputLayerSize = layerSizes[0]
    units1 = layerSizes[1]
    dropout1 = dropouts[0]
    dropout2 = dropouts[1]
    dropout3 = dropouts[2]
    # Test MAE of model on training data (to check for overfitting).
    trainingPredY = model.predict_proba(Xtrain, verbose = 0)
    MAETrain = mean_absolute_error(Ytrain, trainingPredY)

    # Test MAE on test data.
    testPredY = model.predict(Xtest, verbose = 0)
    MAE = mean_absolute_error(Ytest, testPredY)

    # Calculate AUC for each category.
    auc = [0] * lossCategories
    """
    for i in range(0, lossCategories):
        categoryValues = Ytest[:][i:(i+1)]
        categoryPredictions = testPredY[:][i:(i+1)]
        auc[i] = roc_auc_score(categoryPredictions, categoryValues)
    aucAverage = (sum(auc) / len(auc))
    """
    aucAverage = 0
    # Evaluate the model and write results to a file.
    scores = model.evaluate(Xtest, Ytest, verbose = 0)
    testAccuracy = scores[1]
    scores = model.evaluate(Xtrain, Ytrain, verbose = 0)
    trainAccuracy = scores[1]
    if(printResults):
        print("Training MAE: %.2f%%" % (MAETrain * 100))
        print("acc: %.2f%%" % (testAccuracy*100))
        print("auc: %.2f%%" % (aucAverage*100))
        print("MAE: %.2f%%" % (MAE*100))
        print("%s , %s , %s, %s, %s , %s , %s , %s , %s , %s, %s \n"
            % (units1, units2, units3, learningRate, epochs, batchSize,
            patience, optimizer, dropout1, dropout2, dropout3))
        print("\n")        
    # Write model results to a file.
    if(elapsedTime is not False):
        with open(resultsFile, "a") as text_file:
            text_file.write(
                "%s , %s , %s, %s , %s , %s , %s , %s , %s , %s, %s , %s , %s , %s , %s , %s, %s \n"
                % (elapsedTime, MAETrain, trainAccuracy, testAccuracy, aucAverage, MAE, units1,
                units2, units3, learningRate, epochs, batchSize, patience,
                optimizer, dropout1, dropout2, dropout3))
    else:
        with open(resultsFile, "a") as text_file:
            text_file.write(
                "%s , %s , %s , %s , %s , %s ,%s , %s, %s, %s , %s , %s , %s , %s , %s, %s \n"
                % (MAETrain, trainAccuracy, testAccuracy, aucAverage, MAE, units1, units2, units3,
                learningRate, epochs, batchSize, optimizer, dropout1, dropout2,
                dropout3))
开发者ID:hughheadley,项目名称:pookeyPython,代码行数:58,代码来源:lossHyperparams.py

示例13: blended_scorer

def blended_scorer(estimator, X, y):
    ols_preds = ols_preds_for_Xs(X)
    pred_y = estimator.predict(X)
    msg("BLENDED SCORES FOR a CV GROUP:")
    for blend in np.arange(0, 1.01, 0.1):
        blended_prediction = (blend * ols_preds) + ((1.0 - blend) * pred_y)
        blended_score = mean_absolute_error(blended_prediction, y)
        msg("%f * OLS yields score of %f" % (blend, blended_score))
    return mean_absolute_error(y, pred_y)
开发者ID:dsjoerg,项目名称:blundercheck,代码行数:9,代码来源:fit_rfr_pgmodel.py

示例14: main

def main():
    #load da serie
    dtst = Datasets()
    serie = dtst.Leitura_dados(dtst.bases_linear_graduais(3, 35))
    serie = np.asarray(serie)
    particao = Particionar_series(serie, [0.0, 0.0, 0.0], 0)
    serie = particao.Normalizar(serie)
    
    '''
    ELM = ELMRegressor()
    ELM.Tratamento_dados(serie, [0.8, 0.2, 0.2], 4)
    
    #criando uma lista para os dados
    lista_dados = []
    lista_dados.append(ELM.train_entradas)
    lista_dados.append(ELM.train_saidas)
    lista_dados.append(ELM.val_entradas)
    lista_dados.append(ELM.val_saidas)
    lista_dados.append(ELM.teste_entradas)
    lista_dados.append(ELM.teste_saidas)
    
    #Otimizando a arquitetura de uma ELM
    ELM.Otimizar_rede(10, lista_dados)
    '''
        
    #ELM treinando com a entrada e a saida
    #ELM = ELMRegressor(ELM.neuronios_escondidos)
    ELM = ELMRegressor(5)
    ELM.Tratamento_dados(serie, [0.8, 0.2, 0.2], 4)
    ELM.Treinar(ELM.train_entradas, ELM.train_saidas)
    
    #previsao do ELM para o conjunto de treinamento
    prediction_train = ELM.Predizer(ELM.train_entradas)
    MAE_train = mean_absolute_error(ELM.train_saidas, prediction_train)
    print('MAE Treinamento: ', MAE_train)
    
    #previsao do ELM para o conjunto de teste
    prediction_test = ELM.Predizer(ELM.teste_entradas)
    MAE_test = mean_absolute_error(ELM.teste_saidas, prediction_test)
    print('MAE Teste: ', MAE_test)
    
    
    #grafico de previsao para treinamento
    plt.plot(ELM.train_saidas, label = 'Real Treinamento', color = 'Blue')
    plt.plot(prediction_train, label = 'Real Previsão', color = 'Red')
    plt.title('Gráfico Treinamento, MAE: %s' %MAE_train)
    plt.legend()
    plt.tight_layout()
    plt.show()
    
    #grafico de previsao para teste
    plt.plot(ELM.teste_saidas, label = 'Real Teste', color = 'Blue')
    plt.plot(prediction_test, label = 'Previsao Teste', color = 'Red')
    plt.title('Gráfico Teste, MAE: %s' %MAE_test)
    plt.legend()
    plt.tight_layout()
    plt.show()
开发者ID:GustavoHFMO,项目名称:Framework_drift,代码行数:57,代码来源:ELM.py

示例15: build_SGDRegressor

def build_SGDRegressor(train_X, train_y, test_X, test_y):
    ##########
    log_train_y = np.log(train_y)
    ##########
    sgd_regressor = linear_model.SGDRegressor(loss='huber', penalty='l1', alpha=0.001, l1_ratio=0.15, verbose=True, n_iter = 50)
    sgd_regressor.fit(train_X, log_train_y)
    train_yhat = np.exp(sgd_regressor.predict(train_X))
    test_yhat = np.exp(sgd_regressor.predict(test_X))
    print metrics.mean_absolute_error(train_y, train_yhat)
    print metrics.mean_absolute_error(test_y, test_yhat)
开发者ID:alyssonbispo,项目名称:fun_with_kaggle,代码行数:10,代码来源:build_classifiers.py


注:本文中的sklearn.metrics.mean_absolute_error函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。