本文整理汇总了Python中seqeval.metrics.classification_report方法的典型用法代码示例。如果您正苦于以下问题:Python metrics.classification_report方法的具体用法?Python metrics.classification_report怎么用?Python metrics.classification_report使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类seqeval.metrics
的用法示例。
在下文中一共展示了metrics.classification_report方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_epoch_end
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def on_epoch_end(self, epoch, logs={}):
label_true, label_pred = [], []
for i in range(len(self.seq)):
x_true, y_true = self.seq[i]
lengths = self.get_lengths(y_true)
y_pred = self.model.predict_on_batch(x_true)
y_true = self.t.inverse_transform(y_true, lengths)
y_pred = self.t.inverse_transform(y_pred, lengths)
label_true.extend(y_true)
label_pred.extend(y_pred)
acc = accuracy_score(label_true, label_pred)
f1 = f1_seq_score(label_true, label_pred)
print(' - acc: {:04.2f}'.format(acc * 100))
print(' - f1: {:04.2f}'.format(f1 * 100))
print(sequence_report(label_true, label_pred))
logs['f1_seq'] = np.float64(f1)
logs['seq_acc'] = np.float64(acc)
示例2: on_epoch_end
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def on_epoch_end(self, epoch, logs={}):
label_true = []
label_pred = []
for i in range(len(self.seq)):
x_true, y_true = self.seq[i]
lengths = self.get_lengths(y_true)
y_pred = self.model.predict_on_batch(x_true)
y_true = self.p.inverse_transform(y_true, lengths)
y_pred = self.p.inverse_transform(y_pred, lengths)
label_true.extend(y_true)
label_pred.extend(y_pred)
score = f1_score(label_true, label_pred)
print(' - f1: {:04.2f}'.format(score * 100))
print(classification_report(label_true, label_pred))
logs['f1'] = score
示例3: compute_seqacc
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def compute_seqacc(predicts, labels, label_mapper):
y_true, y_pred = [], []
def trim(predict, label):
temp_1 = []
temp_2 = []
for j, m in enumerate(predict):
if j == 0:
continue
if label_mapper[label[j]] != "X":
temp_1.append(label_mapper[label[j]])
temp_2.append(label_mapper[m])
temp_1.pop()
temp_2.pop()
y_true.append(temp_1)
y_pred.append(temp_2)
for predict, label in zip(predicts, labels):
trim(predict, label)
report = classification_report(y_true, y_pred, digits=4)
return report
示例4: benchmark_flair_mdl
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def benchmark_flair_mdl():
tagger = load_flair_ner_model()
start = time.time()
flair_sentences = []
for i, sentence in enumerate(sentences_tokens):
flair_sentence = Sentence()
for token_txt in sentence:
flair_sentence.add_token(Token(token_txt))
flair_sentences.append(flair_sentence)
tagger.predict(flair_sentences, verbose=True)
predictions = [[tok.tags['ner'].value for tok in fs] for fs in flair_sentences]
print("Made predictions on {} sentences and {} tokens in {}s".format(num_sentences, num_tokens, time.time() - start))
assert len(predictions) == num_sentences
print(classification_report(sentences_entities, remove_miscs(predictions), digits=4))
示例5: benchmark_flair_mdl
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def benchmark_flair_mdl():
tagger = load_flair_pos_model()
start = time.time()
tagger.predict(corpus_flair.test)
tags_pred = [[tok.tags['upos'].value for tok in fs] for fs in corpus_flair.test]
print('**Flair model** ')
print("Made predictions on {} sentences and {} tokens in {}s".format(
num_sentences, num_tokens, time.time() - start))
assert len(tags_pred)==num_sentences
assert sum([len(s) for s in tags_pred])==num_tokens
print(classification_report(tags_true, tags_pred,
digits=4))
示例6: compute_seqacc
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def compute_seqacc(predicts, labels, label_mapper):
y_true, y_pred = [], []
def trim(predict, label):
temp_1 = []
temp_2 = []
for j, m in enumerate(predict):
if j == 0:
continue
if label_mapper[label[j]] != 'X':
temp_1.append(label_mapper[label[j]])
temp_2.append(label_mapper[m])
temp_1.pop()
temp_2.pop()
y_true.append(temp_1)
y_pred.append(temp_2)
for predict, label in zip(predicts, labels):
trim(predict, label)
report = classification_report(y_true, y_pred,digits=4)
return report
示例7: score
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def score(self, y_true, y_pred):
"""Calculate f1 score.
Args:
y_true (list): true sequences.
y_pred (list): predicted sequences.
Returns:
score: f1 score.
"""
score = f1_score(y_true, y_pred)
print(' - f1: {:04.2f}'.format(score * 100))
if self.digits:
print(classification_report(y_true, y_pred, digits=self.digits))
return score
示例8: main
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def main():
# load the testset
test_X, test_Y = nagisa.utils.load_file("kwdlc.test")
# build the tagger for kwdlc
ner_tagger = nagisa.Tagger(vocabs='kwdlc_ner_model.vocabs',
params='kwdlc_ner_model.params',
hp='kwdlc_ner_model.hp')
# predict
true_Y = []
pred_Y = []
for words, true_y in zip(test_X, test_Y):
pred_y= ner_tagger.decode(words)
_pred_y = []
_true_y = []
for word, pred, true in zip(words, pred_y, true_y):
_pred_y.append(pred)
_true_y.append(true)
true_Y.append(_true_y)
pred_Y.append(_pred_y)
# evaluate
accuracy = accuracy_score(true_Y, pred_Y)
print("accuracy: {}".format(accuracy))
f1 = f1_score(true_Y, pred_Y)
print("macro-f1: {}".format(f1))
report = classification_report(true_Y, pred_Y)
print(report)
示例9: show_report
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def show_report(labels, preds):
return classification_report(labels, preds, suffix=True)
示例10: evaluate
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def evaluate(self, x: Dict[str, List[List[str]]], y: List[List[str]],
batch_size=64):
x_c = deepcopy(x)
x_len = [item[-1] for item in x_c['token']]
x_c['token'] = [item[:-1] for item in x_c['token']]
x_seq = BasicIterator('sequence_labeling', self.transformer,
x_c, batch_size=batch_size)
result = self.model.model.predict_generator(x_seq)
y_pred = self.transformer.inverse_transform(result, lengths=x_len)
print(sequence_report(y, y_pred))
示例11: call
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def call(self, y_true=None, y_pred=None, arguments=None):
''' compute metric '''
label_path_file = arguments["label_vocab_path"]
return "\n" + seq_classification_report(
ids_to_sentences(y_true, label_path_file),
ids_to_sentences(y_pred, label_path_file),
digits=4)
示例12: evaluate
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def evaluate(self, data: List[List[str]], labels: List[List[str]]) -> float:
"""Evaluate the performance of ner model with given data and labels, and return the f1
score.
Args:
data: List of List of str. List of tokenized (in char level) texts ,
like ``[['我', '在', '上', '海', '上', '学'], ...]``.
labels: List of List of str. The corresponding labels , usually in BIO or BIOES
format, like ``[['O', 'O', 'B-LOC', 'I-LOC', 'O', 'O'], ...]``.
Returns:
Float. The F1 score.
"""
features, y = self.preprocessor.prepare_input(data, labels)
pred_probs = self.model.predict(features)
if self.preprocessor.use_bert:
pred_probs = pred_probs[:, 1:-1, :] # remove <CLS> and <SEQ>
lengths = [min(len(label), pred_prob.shape[0])
for label, pred_prob in zip(labels, pred_probs)]
y_pred = self.preprocessor.label_decode(pred_probs, lengths)
r = metrics.recall_score(labels, y_pred)
p = metrics.precision_score(labels, y_pred)
f1 = metrics.f1_score(labels, y_pred)
logging.info('Recall: {}, Precision: {}, F1: {}'.format(r, p, f1))
logging.info(metrics.classification_report(labels, y_pred))
return f1
示例13: on_epoch_end
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def on_epoch_end(self, epoch, logs=None):
pred_probs = self.model.predict(self.valid_features)
if self.preprocessor.use_bert:
pred_probs = pred_probs[:, 1:-1, :] # remove <CLS> and <SEQ>
y_pred = self.preprocessor.label_decode(pred_probs, self.get_lengths(pred_probs))
r = metrics.recall_score(self.valid_labels, y_pred)
p = metrics.precision_score(self.valid_labels, y_pred)
f1 = metrics.f1_score(self.valid_labels, y_pred)
logs['val_r'] = r
logs['val_p'] = p
logs['val_f1'] = f1
print('Epoch {}: val_r: {}, val_p: {}, val_f1: {}'.format(epoch+1, r, p, f1))
print(metrics.classification_report(self.valid_labels, y_pred))
示例14: benchmark_polyglot_mdl
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def benchmark_polyglot_mdl():
"""
Running ployglot requires these packages:
# Morfessor==2.0.6
# PyICU==2.4.2
# pycld2==0.41
# polyglot
"""
from polyglot.tag import NEChunker
from polyglot.text import WordList
start = time.time()
predictions = []
for tokens in sentences_tokens:
word_list = WordList(tokens, language='da')
ne_chunker = NEChunker(lang='da')
word_ent_tuples = list(ne_chunker.annotate(word_list))
predictions.append([entity for word, entity in word_ent_tuples])
print("Made predictions on {} sentences and {} tokens in {}s".format(
num_sentences, num_tokens, time.time() - start))
assert len(predictions) == len(sentences_entities)
print(classification_report(sentences_entities, remove_miscs(predictions),
digits=4))
示例15: benchmark_spacy_mdl
# 需要导入模块: from seqeval import metrics [as 别名]
# 或者: from seqeval.metrics import classification_report [as 别名]
def benchmark_spacy_mdl():
nlp = load_spacy_model()
ner = nlp.entity
predictions = []
start = time.time()
for token in sentences_tokens:
doc = nlp.tokenizer.tokens_from_list(token)
ner(doc)
ents = []
for t in doc:
if t.ent_iob_ == 'O':
ents.append(t.ent_iob_)
else:
ents.append(t.ent_iob_ + "-" + t.ent_type_)
predictions.append(ents)
print("Made predictions on {} sentences and {} tokens in {}s".format(
num_sentences, num_tokens, time.time() - start)
)
assert len(predictions) == num_sentences
print(classification_report(sentences_entities, remove_miscs(predictions),
digits=4))