本文整理汇总了Python中FileSystem.FileSystem类的典型用法代码示例。如果您正苦于以下问题:Python FileSystem类的具体用法?Python FileSystem怎么用?Python FileSystem使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FileSystem类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(projectName):
path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv')
fid = open(path)
rows = fid.readlines()
fid.close()
buckets = []
bucketsByGender = {}
bucketsByGender['male'] = []
bucketsByGender['female'] = []
bucketsByEmployment = {}
for key in employmentOptions:
bucketsByEmployment[employmentOptions[key]] = []
ageMax = 75
ageMin = 15
sizeBucket = 5
oldies = []
oldiesByGender = {}
oldiesByGender['male'] = []
oldiesByGender['female'] = []
oldiesByEmployment = {}
for key in employmentOptions:
oldiesByEmployment[employmentOptions[key]] = []
for i in range((ageMax-ageMin)/sizeBucket):
buckets.append([])
bucketsByGender['male'].append([])
bucketsByGender['female'].append([])
for key in employmentOptions:
bucketsByEmployment[employmentOptions[key]].append([])
for r in rows:
row = r.strip().split(', ')
age = int(row[1])
gender = row[2]
employment = row[3]
studentStatus = row[4]
grade = int(row[5])
numPosts = int(row[6])
if age >= ageMin and age < ageMax:
bin = (age - ageMin) / sizeBucket
buckets[bin].append(numPosts)
if gender == 'male' or gender == 'female':
bucketsByGender[gender][bin].append(numPosts)
bucketsByEmployment[employment][bin].append(numPosts)
if age >= ageMax:
oldies.append(numPosts)
if gender == 'male' or gender == 'female':
oldiesByGender[gender].append(numPosts)
oldiesByEmployment[employment].append(numPosts)
path = os.path.join(FileSystem.getResultsDir(),projectName,'aggregatedAgeVsForumPosts.csv')
pathMale = os.path.join(FileSystem.getResultsDir(),projectName,'maleAgeVsForumPosts.csv')
pathFemale = os.path.join(FileSystem.getResultsDir(),projectName,'femaleAgeVsForumPosts.csv')
summarizeBuckets(buckets,oldies,path,ageMin, ageMax, sizeBucket)
summarizeBuckets(bucketsByGender['male'],oldiesByGender['male'],pathMale,ageMin, ageMax, sizeBucket)
summarizeBuckets(bucketsByGender['female'],oldiesByGender['female'],pathFemale,ageMin, ageMax, sizeBucket)
for key in employmentOptions:
if len(key) > 0:
path = os.path.join(FileSystem.getResultsDir(), projectName, employmentOptions[key] + 'AgeVsForumPosts.csv')
summarizeBuckets(bucketsByEmployment[employmentOptions[key]], \
oldiesByEmployment[employmentOptions[key]],path,ageMin,ageMax,sizeBucket)
示例2: rename_file
def rename_file(self):
i = self.file_label.text().rfind("\\")
new_name = os.path.join(self.file_label.text()[0:i], self.file_line_edit.text())
FileSystem.rename_file(self,self.file_label.text(), new_name)
self.set_file_name(new_name)
self.file_line_edit.hide()
self.file_label.show()
示例3: main
def main(argv):
if len(argv) != 2:
print 'format: main.py <input>'
return 1
filesystem = FileSystem()
filesystem.initDirs()
Logger.write('start explode video')
videoProcessor = DefaultVideoProcessor()
videoProcessor.splitVideo(argv[1], config.DEFAULT_IMGS_DIR)
filterManager = FiltersManager()
# filterManager.addFilter(CropFilter(230, 263, 575, 530))
#filterManager.addFilter(PseudoColorFilter())
videoProcessor.work(DiffPrevImageProcessor(), filterManager.release())
#videoProcessor.work(DiffMovingAverageDarkImageProcessor().init(30, 30, 'dark.png'), filterManager.release())
Logger.write('start implode video')
targetFilename = '%s_%s.avi' % (argv[1], filterManager.getNames())
videoProcessor.joinVideo(config.DEFAULT_IMGS_OUT_DIR, targetFilename)
Logger.write('end of work. created video in %s' % targetFilename)
filesystem.deleteDirs()
return 0
示例4: run
def run(projectName):
courseDatasets = FileSystem.loadCourseDatasetInfo()
resultsDir = os.path.join(FileSystem.getResultsDir(),projectName)
medianDiffs = []
meanDiffs = []
for course in courseDatasets:
path = os.path.join(resultsDir, course.name + '_contribution.csv')
try:
with open(path) as fid:
forumUserIds = [r.strip() for r in fid.readlines()]
except IOError:
continue
topUserIds = getTopFivePercent(forumUserIds)
DBSetup.switch(course)
threads = ForumThreads.objects.all()
posts = ForumPosts.objects.all()
TC, nonTC = isolateThreadLengths(threads, posts,topUserIds)
TCMedian = median(TC)
nonTCMedian = median(nonTC)
TCMean = mean(TC)
nonTCMean = mean(nonTC)
medianDiffs.append(TCMedian-nonTCMedian)
meanDiffs.append(TCMean-nonTCMean)
print(course.name)
print('Median thread length for threads with posts by top contributors: ' + str(TCMedian))
print('Median thread length for threads without posts by top contributors: ' + str(nonTCMedian))
print('Mean thread length for threads with posts by top contributors: ' + str(TCMean))
print('Mean thread length for threads without posts by top contributors: ' + str(nonTCMean))
print(' ')
print('Average difference between median thread lengths: ' + str(mean(medianDiffs)))
print('Average difference between mean thread lengths: ' + str(mean(meanDiffs)))
示例5: runner
def runner(self):
logging.info('MakeForumViewLogs.runner(), ' + self.currCourseName)
self.courseDatasetInfo = FileSystem.loadCourseDatasetInfo()
try:
if self.currCourseName not in self.courseDatasetInfo \
or self.courseDatasetInfo[self.currCourseName] is None:
print(self.currCourseName + ' has no activity log. Exiting...')
sys.exit()
print('Working on: ' + self.currCourseName + ' (' + self.progress() +')')
activityLogFileZipped = self.getActivityLogFile()
activityLogFileUnzipped = activityLogFileZipped[:-3]
outputDir = os.path.join(FileSystem.getDataDir(), 'ActivityLogsCoursera')
outputPath = os.path.join(outputDir, self.currCourseName + '.viewlog')
if os.path.exists(outputPath):
logging.info('Output file already exists: ' + outputPath)
sys.exit()
if os.path.exists(activityLogFileZipped):
self.unzip(activityLogFileZipped)
if not os.path.exists(activityLogFileUnzipped):
logging.info('Error finding file ' + activityLogFileUnzipped)
sys.exit()
views = self.getViews(activityLogFileUnzipped)
self.writeViews(views,outputPath)
self.zip(activityLogFileUnzipped)
except CourseDBError:
logging.info('\t\t+ ERROR (Connection does not exist), skipping...')
pass
except NoGradesError:
logging.info('\t\t+ ERROR (CourseGrades does not exist), skipping...')
pass
示例6: run
def run(projectName):
categories = [[],[],[],[],[]]
numClasses = 0
classNames = []
path = os.path.join(FileSystem.getResultsDir(),projectName)
for fname in os.listdir(path):
if fname[-4:] == '.csv':
numClasses += 1
classNames.append(fname[:-4])
with open(os.path.join(path, fname)) as fid:
rows = fid.readlines()
data = [float(r.strip().split(', ')[1]) for r in rows]
for i in range(len(data)):
categories[i].append(data[i])
ind = range(numClasses)
width = .5
plt.figure(1)
p0 = plt.bar(ind, categories[0], width,color = 'k')
p1 = plt.bar(ind, categories[1], width,color = 'r',bottom=categories[0])
p2 = plt.bar(ind, categories[2], width,color = 'g',bottom=listsum(categories,[0,1]))
p3 = plt.bar(ind, categories[3], width,color = 'b',bottom=listsum(categories,[0,1,2]))
p4 = plt.bar(ind, categories[4], width,color = 'c',bottom=listsum(categories,[0,1,2,3]))
plt.xticks([x + width/2. for x in ind], classNames, rotation='vertical')
#plt.show()
plt.ylim((0.,1.))
plt.subplots_adjust(bottom=0.5)
figpath = os.path.join(FileSystem.getResultsDir(),projectName,'distribution.pdf')
plt.savefig(figpath)
示例7: run
def run(projectName):
boundDataDir = os.path.join(FileSystem.getResultsDir(),projectName)
boundDataPath = os.path.join(boundDataDir,'results.csv')
outputPath = os.path.join(boundDataDir,'fullCourseList.csv')
courseData = FileSystem.loadCourseDatasetInfo()
boundData = loadBoundData(boundDataPath)
writeData(outputPath, courseData, boundData)
示例8: run
def run():
projectName = 'ForumMetrics'
path = os.path.join(FileSystem.getResultsDir(),projectName,'results.csv')
outputPath = os.path.join(FileSystem.getResultsDir(),projectName,'corrMat.csv')
arr, nanmask = loadResults(path)
X = ma.array(arr,mask = nanmask)
C = np.ma.corrcoef(np.transpose(X))
writeCorrMat(C, outputPath)
示例9: run
def run(projectName):
courseDatasets = FileSystem.loadCourseDatasetInfo()
resultsDir = os.path.join(FileSystem.getResultsDir(),projectName)
outputPath = os.path.join(resultsDir,'topContributorPositions.txt')
cumulativeResultsTC = {}
cumulativeResultsNonTC = {}
cumulativeContHistTC = NUMBINS*[0]
cumulativeContHistNonTC = NUMBINS*[0]
ofid = open(outputPath,'wt')
for course in courseDatasets:
print(course.name)
path = os.path.join(resultsDir, course.name + '_contribution.csv')
try:
with open(path) as fid:
forumUserIds = [r.strip() for r in fid.readlines()]
except IOError:
continue
topUserIds = getTopFivePercent(forumUserIds)
DBSetup.switch(course)
forumData = CourseForums()
resultsTC, resultsNonTC, continuousHistTC, continuousHistNonTC = tallyPositions(forumData, topUserIds)
cumulativeResultsTC = addResultsDict(cumulativeResultsTC, resultsTC)
cumulativeResultsNonTC = addResultsDict(cumulativeResultsNonTC, resultsNonTC)
cumulativeContHistTC = addResultsList(cumulativeContHistTC, continuousHistTC)
cumulativeContHistNonTC = addResultsList(cumulativeContHistNonTC, continuousHistNonTC)
ofid.write('--------------------------------------------\n')
ofid.write('Course: ' + course.name + '\n')
ofid.write('Top contributor post position histogram\n')
summarization(ofid, resultsTC, 10)
ofid.write('\n\n')
ofid.write('Non top contributor post position histogram\n')
summarization(ofid, resultsNonTC, 10)
ofid.write('**************************************\n')
ofid.write('Aggregated over courses:\n')
ofid.write('Top contributor post position histogram\n')
summarization(ofid, cumulativeResultsTC, 20)
ofid.write('\n\n')
ofid.write('Non top contributor post position histogram\n')
summarization(ofid, cumulativeResultsNonTC, 20)
ofid.close()
normalizedCumulativeContHistTC = normalize(cumulativeContHistTC)
normalizedCumulativeContHistNonTC = normalize(cumulativeContHistNonTC)
outputPathTC = os.path.join(resultsDir,'normalizedPositionHistTC.csv')
with open(outputPathTC,'wt') as ofid:
for i in range(NUMBINS):
ofid.write(str(i) + ', ' + str(normalizedCumulativeContHistTC[i]) + '\n')
outputPathNonTC = os.path.join(resultsDir,'normalizedPositionHistNonTC.csv')
with open(outputPathNonTC,'wt') as ofid:
for i in range(NUMBINS):
ofid.write(str(i) + ', ' + str(normalizedCumulativeContHistNonTC[i]) + '\n')
示例10: save
def save(self, path):
root = self.documentXmlDom.documentElement
xml = root.toprettyxml( encoding='UTF-8', indent="", newl="" )
#xml = root.toxml( encoding='UTF-8')
result = FileSystem.filePutContents( path, xml )
return result
pass
示例11: __init__
def __init__(self):
self.forumTypes = list(FileSystem.loadForumTypes())
self.forums = list(ForumForums.objects.all())
self.threads = list(ForumThreads.objects.all())
self.posts = list(ForumPosts.objects.all())
self.comments = list(ForumComments.objects.all())
self.reputations = list(ForumReputationPoints.objects.all())
try:
self.views = list(ForumViewLog.objects.all())
except:
pass
self._buildForumIndex()
self._getForumParentMap()
self._getForumTypeMap()
self._getThreadToForumMap()
self._getThreadToPostMap()
self._getPostToThreadMap()
self._getPostToForumMap()
self._getCommentToPostMap()
self._getCommentToThreadMap()
self._getCommentToForumMap()
try:
self._getViewToThreadMap()
except:
pass
示例12: __init__
def __init__(self):
self.projectName = "PerUserPosting"
self.resultsDir = os.path.join(FileSystem.getResultsDir(), self.projectName)
self.path = os.path.join(self.resultsDir, "results.csv")
self.posters = {}
self._loadPosters()
示例13: __init__
def __init__(self, fileName = "", defaultFileExtension = ""):
self._settings = None
self.fileSystem = FileSystem()
self._fileComponents = MirroredDirectory()
self.importer = Importer()
self._templateDir = None
self.set(fileName, defaultFileExtension)
示例14: mergeCorrelationResults
def mergeCorrelationResults(projectName):
courseList = FileSystem.loadCourseList()
resultsDir = os.path.join(FileSystem.getResultsDir(),projectName)
results = []
for course in courseList:
currDir = os.path.join(resultsDir,course)
path = os.path.join(currDir,'ForumActivityVsQuizScore_regression.csv')
pathStats = os.path.join(currDir,'CourseStats.csv')
try:
currResults = loadRegressionResults(path)
currCourseStats = loadCourseStats(pathStats)
results.append((course,currResults,currCourseStats))
except IOError:
continue
outputPath = os.path.join(resultsDir,'mergedCorrelationResults.csv')
writeMergedCorrelationResults(results, outputPath)
示例15: open
def open(self):
# read RDB
self.rdb = RDBlock(self.rawblk)
if not self.rdb.read():
self.valid = False
return False
# create used block list
self.used_blks = [self.rdb.blk_num]
# read partitions
part_blk = self.rdb.part_list
self.parts = []
num = 0
while part_blk != Block.no_blk:
p = Partition(self.rawblk, part_blk, num, self.rdb.log_drv.cyl_blks, self)
num += 1
if not p.read():
self.valid = False
return False
self.parts.append(p)
# store used block
self.used_blks.append(p.get_blk_num())
# next partition
part_blk = p.get_next_partition_blk()
# read filesystems
fs_blk = self.rdb.fs_list
self.fs = []
num = 0
while fs_blk != PartitionBlock.no_blk:
fs = FileSystem(self.rawblk, fs_blk, num)
num += 1
if not fs.read():
self.valid = False
return False
self.fs.append(fs)
# store used blocks
self.used_blks += fs.get_blk_nums()
# next partition
fs_blk = fs.get_next_fs_blk()
# TODO: add bad block blocks
self.valid = True
self.max_blks = self.rdb.log_drv.rdb_blk_hi + 1
return True