当前位置: 首页>>代码示例>>Python>>正文


Python Utilities.iterateJsonFromFile方法代码示例

本文整理汇总了Python中utilities.Utilities.iterateJsonFromFile方法的典型用法代码示例。如果您正苦于以下问题:Python Utilities.iterateJsonFromFile方法的具体用法?Python Utilities.iterateJsonFromFile怎么用?Python Utilities.iterateJsonFromFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utilities.Utilities的用法示例。


在下文中一共展示了Utilities.iterateJsonFromFile方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: analyzeStatsForDatasets125

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsForDatasets125():
     '''
     train
     politics 31976 1279
     entertainment 21064 842
     technology 36663 1466
     sports 33005 1320
     test
     politics 26373 1054
     entertainment 25714 1028
     technology 29734 1189
     sports 41219 1648
     25
     '''
     trainData, testData, count = defaultdict(int), defaultdict(int), 0
     for l in Utilities.iterateJsonFromFile(Settings.stats_for_dataset_125):
         for k in l['train_classes']: 
             trainData[k]+=l['train_classes'][k]
             testData[k]+=l['test_classes'][k]
         count+=1
     print 'train'
     for k in trainData:
         print k, trainData[k], trainData[k]/count
     print 'test'
     for k in testData:
         print k, testData[k], testData[k]/count
     print count
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:29,代码来源:experiments.py

示例2: analyzeStatsToObservePerformanceByRelabelingDocuments

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsToObservePerformanceByRelabelingDocuments():
     '''
     0.67 0.00
     '''
     perfromanceByRelabeling=[]
     for data in Utilities.iterateJsonFromFile(Settings.stats_to_observe_performance_by_relabeling_documents): perfromanceByRelabeling.append(data['value'])
     print '%0.2f'%numpy.mean(perfromanceByRelabeling), '%0.2f'%numpy.var(perfromanceByRelabeling)
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:9,代码来源:experiments.py

示例3: analyzeTrainingData

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeTrainingData():
     yticks = ('sports', 'technology', 'entertainment', 'politics')
     dataByDay = {}
     for l in Utilities.iterateJsonFromFile(Settings.stats_for_training_data):
         dataByDay[datetime.strptime(l['day'], Settings.twitter_api_time_format)] = l['class_distribution']
     dataToPlot = defaultdict(list)
     previousDaysData = None
     for d in sorted(dataByDay):
         if previousDaysData==None: previousDaysData=dataByDay[d]
         else:
             currentDaysData = dataByDay[d]
             for classType in currentDaysData:
                 dataToPlot[classType].append(numpy.sqrt((currentDaysData[classType]-previousDaysData[classType])**2)/previousDaysData[classType])
     fig=plt.figure()
     cmap=mpl.cm.Blues
     for k in dataToPlot: print k, dataToPlot[k]
     plt.imshow([dataToPlot[k] for k in yticks], cmap = cmap, interpolation='nearest', aspect=5, alpha=1, vmin=0, vmax=2)
     plt.xticks(())
     plt.yticks(range(len(yticks)), [k for k in yticks])
     plt.xlabel('March-April 2011')
     plt.title('Ratio of change in training-set size.')
     
     ax1 = fig.add_axes([0.85, 0.1, 0.05, 0.8])
     norm = mpl.colors.Normalize(vmin=0, vmax=2)
     cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=cmap,
                                    norm=norm,
                                    orientation='vertical',alpha=1)
     
     plt.show()
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:31,代码来源:experiments.py

示例4: analyzeStatsToCompareCollocations

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsToCompareCollocations():
     '''
     125 chi_sqare 0.70 0.00
     125 likelihood_ratio 0.69 0.00
     375 chi_sqare 0.74 0.00
     375 likelihood_ratio 0.69 0.00
     '''
     languageModelToScore=defaultdict(list)
     for data in Utilities.iterateJsonFromFile(Settings.stats_to_compare_collocations): languageModelToScore['%s %s'%(data['number_of_experts'], data['collocation_measure'])].append(data['value'])
     for languageModel in languageModelToScore: print languageModel, '%0.2f'%numpy.mean(languageModelToScore[languageModel]), '%0.2f'%numpy.var(languageModelToScore[languageModel])
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:12,代码来源:experiments.py

示例5: analyzeStatsToDetermineFixedWindowLength

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsToDetermineFixedWindowLength():
     classifierLengthToScore=defaultdict(list)
     for data in Utilities.iterateJsonFromFile(Settings.stats_to_determine_fixed_window_length): classifierLengthToScore[data['classifier_length']].append(data['value'])
     dataX, dataY = [], []
     for classifierLength in classifierLengthToScore: dataX.append(classifierLength), dataY.append(numpy.mean(classifierLengthToScore[classifierLength]))
     plt.plot(dataX, dataY, 'om-', lw=2, label='Unigram model')
     plt.legend()
     plt.title('AUCM at different model window training lengths')
     plt.ylabel('AUCM value')
     plt.xlabel('Length of training window (days)')
     plt.ylim( (0.2, 1) ) 
     plt.show()
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:14,代码来源:experiments.py

示例6: analyzeStatsToCompareDifferentDocumentTypes

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsToCompareDifferentDocumentTypes():
     '''
     char_bigram 0.67 0.00
     ruusl_unigram_with_meta 0.71 0.00
     ruusl_bigram 0.49 0.00
     ruusl_unigram_nouns_with_meta 0.66 0.00
     ruusl_sparse_bigram 0.54 0.00
     removed_url_users_specialcharaters_and_lemmatized 0.71 0.00
     ruusl_unigram_nouns 0.66 0.00
     '''
     languageModelToScore=defaultdict(list)
     for data in Utilities.iterateJsonFromFile(Settings.stats_to_compare_different_document_types): languageModelToScore[data['data_type']].append(data['value'])
     for languageModel in languageModelToScore: print languageModel, '%0.2f'%numpy.mean(languageModelToScore[languageModel]), '%0.2f'%numpy.var(languageModelToScore[languageModel])
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:15,代码来源:experiments.py

示例7: analyzeStatsForDimnishingAUCMValues

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsForDimnishingAUCMValues():
     daysToScore = defaultdict(dict)
     color = {1: 'rx-', 8: 'g>-', 14: 'bo-'}
     for data in Utilities.iterateJsonFromFile(Settings.stats_for_diminishing_aucm): 
         if data['no_of_days_in_future'] not in daysToScore[data['classifier_length']]: daysToScore[data['classifier_length']][data['no_of_days_in_future']]=[]
         daysToScore[data['classifier_length']][data['no_of_days_in_future']].append(data['value'])
     for classifierLength in sorted(daysToScore):
         print classifierLength
         dataX = daysToScore[classifierLength].keys()[4:9]
         dataY = [numpy.mean(daysToScore[classifierLength][x]) for x in dataX]
         plt.plot(dataX, dataY, color[classifierLength], label=str(classifierLength), lw=2)
     plt.legend()
     plt.ylabel('AUCM value')
     plt.xlabel('Number of days in future')
     plt.title('Decay in AUCM with time')
     plt.xticks( range(5,10), range(1,6) )
     plt.ylim( (0.627, 0.735) ) 
     plt.show()
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:20,代码来源:experiments.py

示例8: analyzeStatsForDatasets

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def analyzeStatsForDatasets():
     '''
     1253451
     politics 325699 13027.96
     entertainment 222124 8884.96
     technology 372908 14916.32
     sports 332720 13308.8
     25
     '''
     total, perClassCount = 0, {}
     for l in Utilities.iterateJsonFromFile(Settings.stats_for_dataset):
         total+=l['total_tweets']
         for classType in l['classes']:
             if classType not in perClassCount: perClassCount[classType]={'total':0, 'no_of_days':0}
             perClassCount[classType]['total']+=l['classes'][classType]
             perClassCount[classType]['no_of_days']+=1
     print total
     for k, v in perClassCount.iteritems(): print k, perClassCount[k]['total'], perClassCount[k]['total']/float(perClassCount[k]['no_of_days'])
     print perClassCount[k]['no_of_days']
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:21,代码来源:experiments.py

示例9: analyzeStatsForGlobalClassifier

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
    def analyzeStatsForGlobalClassifier():
        dataToPlot=dict()
        for l in Utilities.iterateJsonFromFile(Settings.stats_for_global_classifier): dataToPlot[datetime.strptime(l['day'], Settings.twitter_api_time_format)]=l['value']
        
        date1 = Settings.startTime
        date2 = Settings.endTime
        dates = drange(date1, date2, timedelta(days=1))
        
        print len(dates), len(dataToPlot)
        fig=plt.figure()
#        plt.plot_date(dates, [1 for k in dates], '-')
        plt.plot_date(dates, [dataToPlot[k] for k in sorted(dataToPlot)[:-1]], 'g-', lw=2, label='Global classifier (mean:%0.2f)'%numpy.mean(dataToPlot.values()))
#        plt.plot_date(dates, [0 for k in dates], '-')
        plt.ylim((0.4,0.55))
        plt.ylabel('AUCM value')
        plt.xlabel('Day')
        plt.title('AUCM values for global classifier.')
        plt.legend()
        fig.autofmt_xdate()
        plt.show()
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:22,代码来源:experiments.py

示例10: trainAndSave

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def trainAndSave(self):
     Utilities.createDirectory(self.trainedClassifierFile)
     self.trainClassifier(((d['data'], d['class'])for d in Utilities.iterateJsonFromFile(Settings.globalClassifierData)))
     Classifier.saveClassifier(self.classifier, self.trainedClassifierFile)
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:6,代码来源:classifiers.py

示例11: _getFeatureDataByDay

# 需要导入模块: from utilities import Utilities [as 别名]
# 或者: from utilities.Utilities import iterateJsonFromFile [as 别名]
 def _getFeatureDataByDay():
     dataByDay = defaultdict(dict)
     for l in Utilities.iterateJsonFromFile(Settings.stats_for_most_informative_features):
         day = datetime.strptime(l['day'], Settings.twitter_api_time_format)
         for k, g in groupby(sorted(l['features'], key=itemgetter(1)), key=itemgetter(1)): dataByDay[day][k] = [i[0] for i in g]
     return dataByDay
开发者ID:kykamath,项目名称:twitter_classifier,代码行数:8,代码来源:experiments.py


注:本文中的utilities.Utilities.iterateJsonFromFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。