本文整理汇总了Python中Core.IdSet.IdSet.getNames方法的典型用法代码示例。如果您正苦于以下问题:Python IdSet.getNames方法的具体用法?Python IdSet.getNames怎么用?Python IdSet.getNames使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Core.IdSet.IdSet
的用法示例。
在下文中一共展示了IdSet.getNames方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test
# 需要导入模块: from Core.IdSet import IdSet [as 别名]
# 或者: from Core.IdSet.IdSet import getNames [as 别名]
def test(cls, examples, modelPath, output=None, parameters=None, forceInternal=False, classIds=None): # , timeout=None):
"""
Classify examples with a pre-trained model.
@type examples: string (filename) or list (or iterator) of examples
@param examples: a list or file containing examples in SVM-format
@type modelPath: string
@param modelPath: filename of the pre-trained model file
@type parameters: a dictionary or string
@param parameters: parameters for the classifier
@type output: string
@param output: the name of the predictions file to be written
@type forceInternal: Boolean
@param forceInternal: Use python classifier even if SVM Multiclass binary is defined in Settings.py
"""
if type(parameters) == types.StringType:
parameters = splitParameters(parameters)
timer = Timer()
if type(examples) == types.ListType:
print >> sys.stderr, "Classifying", len(examples), "with SVM-MultiClass model", modelPath
examples, predictions = self.filterClassificationSet(examples, False)
testPath = self.tempDir+"/test.dat"
Example.writeExamples(examples, testPath)
else:
print >> sys.stderr, "Classifying file", examples, "with SVM-MultiClass model", modelPath
testPath = examples
examples = Example.readExamples(examples,False)
if parameters != None:
parameters = copy.copy(parameters)
if parameters.has_key("c"):
del parameters["c"]
if parameters.has_key("predefined"):
parameters = copy.copy(parameters)
modelPath = os.path.join(parameters["predefined"][0],"classifier/model")
del parameters["predefined"]
# Read model
if modelPath == None:
modelPath = "model-multilabel"
classModels = {}
if modelPath.endswith(".gz"):
f = gzip.open(modelPath, "rt")
else:
f = open(modelPath, "rt")
thresholds = {}
for line in f:
key, value, threshold = line.split()
classModels[key] = value
if threshold != "None":
thresholds[key] = float(threshold)
else:
thresholds[key] = 0.0
f.close()
mergedPredictions = []
if type(classIds) == types.StringType:
classIds = IdSet(filename=classIds)
#print classModels
print "Thresholds", thresholds
classifierBin = Settings.SVMMultiClassDir+"/svm_multiclass_classify"
print parameters
if "classifier" in parameters and "svmperf" in parameters["classifier"]:
classifierBin = Settings.SVMPerfDir+"/svm_perf_classify"
parameters = copy.copy(parameters)
del parameters["classifier"]
for className in classIds.getNames():
if className != "neg" and not "---" in className:
classId = classIds.getId(className)
if thresholds[str(className)] != 0.0:
print >> sys.stderr, "Classifying", className, "with threshold", thresholds[str(className)]
else:
print >> sys.stderr, "Classifying", className
args = [classifierBin]
#self.__addParametersToSubprocessCall(args, parameters)
classOutput = "predictions" + ".cls-" + className
logFile = open("svmmulticlass" + ".cls-" + className + ".log","at")
args += [testPath, classModels[str(className)], classOutput]
print args
subprocess.call(args, stdout = logFile, stderr = logFile)
cls.addPredictions(classOutput, mergedPredictions, classId, len(classIds.Ids), threshold=thresholds[str(className)])
print >> sys.stderr, timer.toString()
predFileName = output
f = open(predFileName, "wt")
for mergedPred in mergedPredictions:
if len(mergedPred[0]) > 1 and "1" in mergedPred[0]:
mergedPred[0].remove("1")
mergedPred[1] = str(mergedPred[1])
mergedPred[0] = ",".join(sorted(list(mergedPred[0])))
f.write(" ".join(mergedPred) + "\n")
f.close()
return mergedPredictions
示例2: ids
# 需要导入模块: from Core.IdSet import IdSet [as 别名]
# 或者: from Core.IdSet.IdSet import getNames [as 别名]
#.........这里部分代码省略.........
self.elementCounts = None
self.progress = ProgressCounter(None, "Build examples")
else:
self.elementCounts = None
self.progress = ProgressCounter(None, "Build examples")
self.calculatePredictedRange(self.getSentences(input, self.parse, self.tokenization))
removeIntersentenceInteractions = True
if "keep_intersentence" in self.styles and self.styles["keep_intersentence"]:
print >> sys.stderr, "Keeping intersentence interactions for input corpus"
removeIntersentenceInteractions = False
inputIterator = getCorpusIterator(input, None, self.parse, self.tokenization, removeIntersentenceInteractions=removeIntersentenceInteractions)
#goldIterator = []
if gold != None:
removeGoldIntersentenceInteractions = True
if "keep_intersentence_gold" in self.styles and self.styles["keep_intersentence_gold"]:
print >> sys.stderr, "Keeping intersentence interactions for gold corpus"
removeGoldIntersentenceInteractions = False
goldIterator = getCorpusIterator(gold, None, self.parse, self.tokenization, removeIntersentenceInteractions=removeGoldIntersentenceInteractions)
for inputSentences, goldSentences in itertools.izip_longest(inputIterator, goldIterator, fillvalue=None):
assert inputSentences != None
assert goldSentences != None
self.processDocument(inputSentences, goldSentences, outfile, structureAnalyzer=structureAnalyzer)
else:
for inputSentences in inputIterator:
self.processDocument(inputSentences, None, outfile, structureAnalyzer=structureAnalyzer)
outfile.close()
self.progress.endUpdate()
# Show statistics
print >> sys.stderr, "Examples built:", self.exampleCount
print >> sys.stderr, "Features:", len(self.featureSet.getNames())
print >> sys.stderr, "Style:", Utils.Parameters.toString(self.getParameters(self.styles))
if self.exampleStats.getExampleCount() > 0:
self.exampleStats.printStats()
# Save Ids
if allowNewIds:
self.saveIds()
def processDocument(self, sentences, goldSentences, outfile, structureAnalyzer=None):
#calculatePredictedRange(self, sentences)
for i in range(len(sentences)):
sentence = sentences[i]
goldSentence = None
if goldSentences != None:
goldSentence = goldSentences[i]
self.progress.update(1, "Building examples ("+sentence.sentence.get("id")+"): ")
self.processSentence(sentence, outfile, goldSentence, structureAnalyzer=structureAnalyzer)
def processSentence(self, sentence, outfile, goldSentence=None, structureAnalyzer=None):
# Process filtering rules
if self.styles["sentenceLimit"]: # Rules for limiting which sentences to process
# Get the rule list
limitRules = self.styles["sentenceLimit"]
if type(limitRules) in types.StringTypes:
limitRules = [limitRules]
# Get the list of sentence element attribute names
sentenceElement = sentence.sentence
sentenceAttributes = sorted(sentenceElement.attrib.keys())
# Filter sentences based on matching rules to their attribute values
for rule in limitRules:
for sentAttr in sentenceAttributes:
# Rule are of the form "attr.value" where "attr" is the name