当前位置: 首页>>代码示例>>Python>>正文


Python FreqDist.tabulate方法代码示例

本文整理汇总了Python中nltk.probability.FreqDist.tabulate方法的典型用法代码示例。如果您正苦于以下问题:Python FreqDist.tabulate方法的具体用法?Python FreqDist.tabulate怎么用?Python FreqDist.tabulate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nltk.probability.FreqDist的用法示例。


在下文中一共展示了FreqDist.tabulate方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: print_results

# 需要导入模块: from nltk.probability import FreqDist [as 别名]
# 或者: from nltk.probability.FreqDist import tabulate [as 别名]
    def print_results(self):
        print
        print("int-binned log-likelihood distributions:")
        ll_fdist = FreqDist(self.res["dlist_dist"])
        ll_fdist.tabulate()
        print
        print(self.res["cm"])

        print("{:<30}{:>.3%}"
                .format("Majority Class Prior Prob: ",
                   self.res["prior_probability"]))
        print("{:<30}{:>}"
                .format("Majority Class Label: ", self.majority_label))

        print
        print("{:<30}{:>.3%}"
                .format("Accuracy: ", self.res["accuracy"]))
        print("{:<30}{:>.3%}"
                .format("Error: ", self.res["error"]))
        print("{:<30}{:>.3%}"
                .format("Error Reduction / Baseline: ",
                    self.res["error_reduction"]))

        print
        print("{:<7}{:<23}{:>.3%}"
                .format(self.root_star,
                    "Precision: ",
                    self.res["root_star_precision"]))
        print("{:<7}{:<23}{:>.3%}"
                .format(self.root,
                    "Precision: ",
                    self.res["root_precision"]))
        print("{:<7}{:<23}{:>.3%}"
                .format(self.root_star,
                    "Recall: ",
                    self.res["root_star_recall"]))
        print("{:<7}{:<23}{:>.3%}"
                .format(self.root,
                    "Recall: ",
                    self.res["root_recall"]))

        print
        print("{:<30}{:>.3%}"
                .format("Macro Precision: ", self.res["macro_precision"]))
        print("{:<30}{:>.3%}"
                .format("Macro Recall: ", self.res["macro_recall"]))
        print
        print("Top Ten Rules:")
        for l in self.decision_list[:10]:
            print("{:<30}{:>.4}".format(l[0], l[1]))
        print
        print("3 Correct:")
        for l in self.res["correct"][:3]:
            print("Correctly Predicted: {} \n Rule: {}, log-likelihood: {} \n {}"
                    .format(l[0], l[2][0], l[2][1], " ".join(l[3])))
        print
        print("3 Incorrect:")
        for l in self.res["incorrect"][:3]:
            print("Predicted: {}, was actually: {} \n Rule: {}, log-likelihood: {} \n {}"
                    .format(l[0], l[1], l[2][0], l[2][1], " ".join(l[3])))
开发者ID:dropofwill,项目名称:word-sense-disambiguation,代码行数:62,代码来源:decision_list_clf.py

示例2: testFunc

# 需要导入模块: from nltk.probability import FreqDist [as 别名]
# 或者: from nltk.probability.FreqDist import tabulate [as 别名]
def testFunc():
    fw = open("./MZI/data.doc", "r", encoding="utf8");
    text = fw.read();
    tockens = getWordList(text)
    print(len(set(tockens)))
    from nltk.probability import FreqDist
    from nltk.util import bigrams
    fdist = FreqDist(w for w in tockens if len(w) > 1);
    fdist.tabulate(50);
    big = list(bigrams(w for w in tockens if len(w) > 1));
    print(big[:100]);
    fdist = FreqDist(str(w) for w in big);
    fdist.tabulate(10);
    fdist.plot(50)
开发者ID:olee12,项目名称:Stylogenetics,代码行数:16,代码来源:MakeNormalData.py

示例3: len

# 需要导入模块: from nltk.probability import FreqDist [as 别名]
# 或者: from nltk.probability.FreqDist import tabulate [as 别名]
        tokens = tagger.tag(tokens2)

        if len(tokens) == 0:
            print "empty text found after preprocessing...discard"
            continue

        if i[1] == 5 and pos < 500:
            tuple = (tokens, "good location")
            documents.append(tuple)
            pos += 1

        if i[1] < 3 and neg < 500:
            tuple = (tokens, "bad location")
            documents.append(tuple)
            neg += 1

list = []
for w in documents:
    list.append(w[1])
print len(documents)
dict = FreqDist(list)
dict.tabulate()
for w in dict:
    print w, dict[w]
f1.close()
fout = open("../classification/location/location.dat", "wb")
pickle.dump(documents, fout, protocol=0)
fout.close()
print "Finish\n"
开发者ID:gunbuster363,项目名称:DataMining_university,代码行数:31,代码来源:class_location.py

示例4: FreqDist

# 需要导入模块: from nltk.probability import FreqDist [as 别名]
# 或者: from nltk.probability.FreqDist import tabulate [as 别名]
import string
import re

for line in fileinput.input(files='-'):
	data = json.loads(line)

toolfdist = FreqDist()
nontoolfdist = FreqDist()

stopwords = nltk.corpus.stopwords.words('english')

for i in range(len(data)):
	text = word_tokenize(data[i]['abstract'])
	if data[i]['is_tool']:
		for word in text:
			word = word.lower()
			if word not in stopwords and word not in string.punctuation and re.fullmatch(r'[0-9\!\"\#\$\%\&\'\(\)\*\+\,\-.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~≥]*', word) is None:
				toolfdist[word] += 1
	else:
		for word in text:
			word = word.lower()
			if word not in stopwords and word not in string.punctuation and re.fullmatch(r'[0-9\!\"\#\$\%\&\'\(\)\*\+\,\-.\/\:\;\<\=\>\?\@\[\\\]\^\_\`\{\|\}\~≥]*', word) is None:
				nontoolfdist[word] += 1

for word in toolfdist:
	if word in nontoolfdist:
		toolfdist[word] -= 10*nontoolfdist[word]
		#toolfdist[word] = 0

toolfdist.tabulate(200)
开发者ID:UCLA-BD2K,项目名称:Aztec_Pub_Classifier,代码行数:32,代码来源:topwords.py


注:本文中的nltk.probability.FreqDist.tabulate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。