本文整理汇总了Python中library.file_io.FileIO.iterateLinesFromFile方法的典型用法代码示例。如果您正苦于以下问题:Python FileIO.iterateLinesFromFile方法的具体用法?Python FileIO.iterateLinesFromFile怎么用?Python FileIO.iterateLinesFromFile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类library.file_io.FileIO
的用法示例。
在下文中一共展示了FileIO.iterateLinesFromFile方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_ltuo_hashtag_and_model_rank_accuracy_and_random_rank_accuracy
# 需要导入模块: from library.file_io import FileIO [as 别名]
# 或者: from library.file_io.FileIO import iterateLinesFromFile [as 别名]
def get_ltuo_hashtag_and_model_rank_accuracy_and_random_rank_accuracy(file):
ltuo_hashtag_and_model_rank_accuracy_and_random_rank_accuracy = []
for data in FileIO.iterateLinesFromFile(file):
# hashtag, model_rank_accuracy, random_rank_accuracy = data.split(',')[1:3]
data = data.split(',')[2:5]
ltuo_hashtag_and_model_rank_accuracy_and_random_rank_accuracy.append([float(i) for i in [data[2], data[0], data[1]]])
return ltuo_hashtag_and_model_rank_accuracy_and_random_rank_accuracy
示例2: iterateFrequentLocationsFromFIMahout
# 需要导入模块: from library.file_io import FileIO [as 别名]
# 或者: from library.file_io.FileIO import iterateLinesFromFile [as 别名]
def iterateFrequentLocationsFromFIMahout(
minLocationsTheUserHasCheckedin,
minUniqueUsersCheckedInTheLocation,
minCalculatedSupport,
minLocationsInItemset=0,
extraMinSupport=minSupport,
yieldSupport=False,
lids=False,
):
# for line in FileIO.iterateLinesFromFile(locationsFIMahoutOutputFile%(minUserLocations, minCalculatedSupport)):
for line in FileIO.iterateLinesFromFile(
locationsFIMahoutOutputFile
% (minLocationsTheUserHasCheckedin, minUniqueUsersCheckedInTheLocation, minCalculatedSupport)
):
if line.startswith("Key:"):
data = line.split("Value: ")[1][1:-1].split(",")
if not lids:
locationItemset, support = (
[getLocationFromLid(i.replace("_", " ")) for i in data[0][1:-1].split()],
int(data[1]),
)
else:
locationItemset, support = [i.replace("_", " ") for i in data[0][1:-1].split()], int(data[1])
if support >= extraMinSupport and len(locationItemset) >= minLocationsInItemset:
if not yieldSupport:
yield [location for location in locationItemset if isWithinBoundingBox(location, us_boundary)]
else:
yield [
location
for location in locationItemset
if isWithinBoundingBox(getLocationFromLid(location), us_boundary)
], support
示例3: streamingLSHClusteringDemo
# 需要导入模块: from library.file_io import FileIO [as 别名]
# 或者: from library.file_io.FileIO import iterateLinesFromFile [as 别名]
def streamingLSHClusteringDemo():
clustering_settings = {'dimensions': 53,
'signature_length': 13,
'number_of_permutations': 5,
'threshold_for_document_to_be_in_cluster': 0.2}
clustering=StreamingLSHClustering(**clustering_settings)
docId = 0
docsToOriginalClusterMap = {}
for line in FileIO.iterateLinesFromFile('../data/streaming.dat'):
document = createDocumentFromLine(docId, line)
docsToOriginalClusterMap[docId] = document.clusterId
docId+=1
clustering.getClusterAndUpdateExistingClusters(document)
clusterLabels = []
for k, cluster in clustering.clusters.iteritems(): clusterLabels.append([docsToOriginalClusterMap[doc.docId] for doc in cluster.iterateDocumentsInCluster()])
return EvaluationMetrics.getValueForClusters(clusterLabels, EvaluationMetrics.purity)
示例4: createDocumentFromLine
# 需要导入模块: from library.file_io import FileIO [as 别名]
# 或者: from library.file_io.FileIO import iterateLinesFromFile [as 别名]
nns_settings = {'dimensions': 53,
'signature_length': 13,
'number_of_permutations': 5,
'signature_type': 'signature_type_lists',
'nearest_neighbor_threshold': 0.2}
def createDocumentFromLine(docId, line):
vector, words = Vector(), line.split()
for word in words[1:]:
if word not in vector: vector[word]=1
else: vector[word]+=1
return Document(words[0], vector)
i = 0
documents = []
for line in FileIO.iterateLinesFromFile('../data/streaming.dat'):
documents.append(createDocumentFromLine(None, line)); i+=1
if i==10: break
class NearestNeighborUsingLSHTests(unittest.TestCase):
def setUp(self):
self.nnsLSH = NearestNeighborUsingLSH(**nns_settings)
# def test_nns(self):
# for d in documents:
# self.nnsLSH.update(d)
# self.assertEqual(d.docId, self.nnsLSH.getNearestDocument(d))
def test_getNearestDocumentWithReplacement(self):
for d in documents: self.nnsLSH.update(d)
for d in documents: print d.docId, self.nnsLSH.getNearestDocumentWithReplacement(d)