本文整理汇总了Python中sklearn.metrics.classification.accuracy_score方法的典型用法代码示例。如果您正苦于以下问题:Python classification.accuracy_score方法的具体用法?Python classification.accuracy_score怎么用?Python classification.accuracy_score使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics.classification
的用法示例。
在下文中一共展示了classification.accuracy_score方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict
# 需要导入模块: from sklearn.metrics import classification [as 别名]
# 或者: from sklearn.metrics.classification import accuracy_score [as 别名]
def predict(self, model, X, y):
Y_pred = model.predict_proba_dict(X)
df = pd.DataFrame(Y_pred).values
print('Accuracy: ', accuracy_score(y, np.argmax(df, axis=1)))
return df
示例2: predict
# 需要导入模块: from sklearn.metrics import classification [as 别名]
# 或者: from sklearn.metrics.classification import accuracy_score [as 别名]
def predict(self, model, X, y, ContinuousColumnName, CategoricalColumnName):
predictions = np.array(list(model.predict_proba(input_fn=lambda: self.input_fn(X, y, ContinuousColumnName, CategoricalColumnName))))
results = model.evaluate(input_fn=lambda: self.input_fn(X, y, ContinuousColumnName, CategoricalColumnName), steps=1)
for key in sorted(results):
print("%s: %s"%(key, results[key]))
print('Accuracy: ', accuracy_score(y, tf.argmax(predictions, axis=1)))
return predictions
示例3: predict
# 需要导入模块: from sklearn.metrics import classification [as 别名]
# 或者: from sklearn.metrics.classification import accuracy_score [as 别名]
def predict(self, model, X, y):
predictions = model.predict_proba(X)
if np.isfinite(y).all():
print('Accuracy: ', accuracy_score(y, np.argmax(predictions, axis=1)))
return predictions
示例4: prediction_score
# 需要导入模块: from sklearn.metrics import classification [as 别名]
# 或者: from sklearn.metrics.classification import accuracy_score [as 别名]
def prediction_score(train_X, train_y, test_X, test_y, metric, model):
# if the train labels are always the same
values_train = set(train_y)
if len(values_train) == 1:
# predict always that value
only_value_train = list(values_train)[0]
test_pred = np.ones_like(test_y) * only_value_train
# if the train labels have different values
else:
# create the model
if model == "random_forest_classifier":
m = RandomForestClassifier(n_estimators=10)
elif model == "logistic_regression":
m = LogisticRegression()
else:
raise Exception("Invalid model name.")
# fit and predict
m.fit(train_X, train_y)
test_pred = m.predict(test_X)
# calculate the score
if metric == "f1":
return f1_score(test_y, test_pred)
elif metric == "accuracy":
return accuracy_score(test_y, test_pred)
else:
raise Exception("Invalid metric name.")
示例5: train_and_eval
# 需要导入模块: from sklearn.metrics import classification [as 别名]
# 或者: from sklearn.metrics.classification import accuracy_score [as 别名]
def train_and_eval(ngram_range=(1, 1), max_features=None, max_df=1.0, C=1.0):
"""Train and eval newsgroup classification.
:param ngram_range: ngram range
:param max_features: the number of maximum features
:param max_df: max document frequency ratio
:param C: Inverse of regularization strength for LogisticRegression
:return: metrics
"""
# Loads train and test data.
train_data = fetch_20newsgroups(subset='train')
test_data = fetch_20newsgroups(subset='test')
# Define the pipeline.
pipeline = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', LogisticRegression(multi_class='auto'))
])
# Set pipeline parameters.
params = {
'tfidf__ngram_range': ngram_range,
'tfidf__max_features': max_features,
'tfidf__max_df': max_df,
'clf__C': C,
}
pipeline.set_params(**params)
print(pipeline.get_params().keys())
# Train the model.
pipeline.fit(train_data.data, train_data.target)
# Predict test data.
start_time = time()
predictions = pipeline.predict(test_data.data)
inference_time = time() - start_time
avg_inference_time = 1.0 * inference_time / len(test_data.target)
print("Avg. inference time: {}".format(avg_inference_time))
# Calculate the metrics.
accuracy = accuracy_score(test_data.target, predictions)
recall = recall_score(test_data.target, predictions, average='weighted')
f1 = f1_score(test_data.target, predictions, average='weighted')
metrics = {
'accuracy': accuracy,
'recall': recall,
'f1': f1,
}
return metrics
示例6: train_and_eval
# 需要导入模块: from sklearn.metrics import classification [as 别名]
# 或者: from sklearn.metrics.classification import accuracy_score [as 别名]
def train_and_eval(output, ngram_range=(1, 1), max_features=None, max_df=1.0, C=1.0):
"""Train and eval newsgroup classification.
:param ngram_range: ngram range
:param max_features: the number of maximum features
:param max_df: max document frequency ratio
:param C: Inverse of regularization strength for LogisticRegression
:return: metrics
"""
# Loads train and test data.
train_data = fetch_20newsgroups(subset='train')
test_data = fetch_20newsgroups(subset='test')
# Define the pipeline.
pipeline = Pipeline([
('tfidf', TfidfVectorizer()),
('clf', LogisticRegression(multi_class='auto'))
])
# Set pipeline parameters.
params = {
'tfidf__ngram_range': ngram_range,
'tfidf__max_features': max_features,
'tfidf__max_df': max_df,
'clf__C': C,
}
pipeline.set_params(**params)
print(pipeline.get_params().keys())
# Train the model.
pipeline.fit(train_data.data, train_data.target)
# Predict test data.
start_time = time()
predictions = pipeline.predict(test_data.data)
inference_time = time() - start_time
avg_inference_time = 1.0 * inference_time / len(test_data.target)
print("Avg. inference time: {}".format(avg_inference_time))
# Calculate the metrics.
accuracy = accuracy_score(test_data.target, predictions)
recall = recall_score(test_data.target, predictions, average='weighted')
f1 = f1_score(test_data.target, predictions, average='weighted')
metrics = {
'accuracy': accuracy,
'recall': recall,
'f1': f1,
}
# Persistent the model.
joblib.dump(pipeline, output)
return metrics