本文整理汇总了Python中Utils.DbUtils.DbUtils.executeAllResults方法的典型用法代码示例。如果您正苦于以下问题:Python DbUtils.executeAllResults方法的具体用法?Python DbUtils.executeAllResults怎么用?Python DbUtils.executeAllResults使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Utils.DbUtils.DbUtils
的用法示例。
在下文中一共展示了DbUtils.executeAllResults方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class Conversion:
def __init__(self):
self.tableName = 'Conversion'
self.DBClient = DbUtils()
self.sqlBuilder = SQLBuilder()
def getObjectFromTuple(self, tuple):
valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], scan_type=tuple[3],
scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
s_identifier=tuple[6], i_identifier=tuple[7], file_type=tuple[8], raw_folder=tuple[9],
converted_folder=tuple[10], version=tuple[11], converted=tuple[12])
return ConversionObject(valuesDict)
def insertToTable(self, objList):
for obj in objList:
self.DBClient.executeNoResult(
self.sqlBuilder.getSQL_AddNewEntryToConversionTable(obj.sqlInsert()))
def get_version(self, sortingObj, versionDict):
if sortingObj.study == 'ADNI':
dl_path = sortingObj.download_folder
if 'Uniform' in dl_path:
return 'V2'
else:
return versionDict[sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type] in versionDict else 'V1'
else:
return versionDict[sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type] in versionDict else 'V1'
def insertFromSortingObj(self, sortingObj, versionDict):
sortingValues = sortingObj.getValuesDict()
version = self.get_version(sortingObj, versionDict)
sortingValues['converted_folder'] = '{0}/{1}/{2}/{3}/{4}_{5}_{6}/{7}/converted/final'.format(sc.studyDatabaseRootDict[sortingObj.study],
sortingObj.study, sortingObj.scan_type, sortingObj.rid,
sortingObj.scan_date, sortingObj.s_identifier, sortingObj.i_identifier, version)
sortingValues['version'] = version
sortingValues['converted'] = 0
self.insertToTable([ConversionObject(sortingValues)])
def gettoBeConvertedPerStudy(self, study):
toConvertList = self.DBClient.executeAllResults(
self.sqlBuilder.getSQL_getToBeConvertedFileFromConversionTable(study))
return [self.getObjectFromTuple(t) for t in toConvertList]
def setConvertedTrue(self, convertionObj):
convertionObj.converted = 1
self.saveObj(convertionObj)
def setConvertedFailed(self, convertionObj):
convertionObj.skip = 1
self.saveObj(convertionObj)
def saveObj(self, convertionObj):
self.DBClient.executeNoResult(self.sqlBuilder.getSQL_saveObjConversionTable(convertionObj))
def getConvertedListPerStudy(self, study):
convertedList = self.DBClient.executeAllResults(self.sqlBuilder.getSQL_getAllConvertedFromConvertionTable(study))
return [self.getObjectFromTuple(t) for t in convertedList]
示例2: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class Processing:
def __init__(self):
self.DBClient = DbUtils()
self.sqlBuilder = SQLBuilder()
def getObjectFromTuple(self, tuple):
valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], modality=tuple[3],
scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
s_identifier=tuple[6], i_identifier=tuple[7], root_folder=tuple[8], converted_folder=tuple[9], version=tuple[10],
processed=tuple[12])
return ProcessingObject(valuesDict)
def insertToTable(self, objList):
for obj in objList:
self.DBClient.executeNoResult(
self.sqlBuilder.getSQL_AddNewEntryToProcessingTable(obj.sqlInsert()))
def insertFromConvertionObj(self, convertionObj):
convertionValues = convertionObj.getValuesDict()
convertionValues['modality'] = sc.ProcessingModalityAndPipelineTypePerStudy[convertionObj.study][convertionObj.scan_type]
convertionValues['root_folder'] = '/'.join(convertionObj.converted_folder.split('/')[0:-2]) # Keeping only the three last elements
self.insertToTable([ProcessingObject(convertionValues)])
def getToProcessListPerStudy(self, study):
toProcessList = self.DBClient.executeAllResults(
self.sqlBuilder.getSQL_getToBeProcessedFromProcessingTable(study))
return [self.getObjectFromTuple(t) for t in toProcessList]
示例3: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class Sorting:
def __init__(self):
self.tableName = 'Sorting'
self.DBClient = DbUtils()
self.sqlBuilder = SQLBuilder()
def getObjectFromTuple(self, tuple):
valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], scan_type=tuple[3],
scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
s_identifier=tuple[6], i_identifier=tuple[7], file_type=tuple[8], download_folder=tuple[9],
raw_folder=tuple[10], moved=tuple[11])
return SortingObject(valuesDict)
def insertToTable(self, objList):
for obj in objList:
self.DBClient.executeNoResult(
self.sqlBuilder.getSQL_AddNewEntryToSortingTable(obj.sqlInsert()))
def getUnmovedFilesPerStudy(self, study):
unmovedList = self.DBClient.executeAllResults(
self.sqlBuilder.getSQL_getUnmovedFilesFromSortingTable(study, tuple(sc.ProcessingModalityAndPipelineTypePerStudy[study].keys())))
return [self.getObjectFromTuple(t) for t in unmovedList]
def setMovedTrue(self, sortingObj):
sortingObj.moved = 1
self.saveObj(sortingObj)
def saveObj(self, sortingObj):
self.DBClient.executeNoResult(self.sqlBuilder.getSQL_saveObjSortingTable(sortingObj))
示例4: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class QCHandler:
def __init__(self):
self.DBClient = DbUtils()
def requestQC(self, study, modal_table, modal_tableId, qcField, qctype, qcFolder):
qcsql = "INSERT IGNORE INTO QC VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}','{5}' , 0, 0, 0, 0, Null)".format(study.upper(), modal_table,
modal_tableId,
qcField,
qctype, qcFolder)
self.DBClient.executeNoResult(qcsql)
def checkQCJobs(self, study, modality):
sql = "SELECT * FROM {0}_{1}_Pipeline WHERE QC = 1 AND FINISHED = 1".format(study, modality)
res = self.DBClient.executeAllResults(sql)
if len(res) < 1:
return 0
else:
for result in res:
proc_id = result[1]
setProcessedSQL = "UPDATE Processing SET PROCESSED = 1, QCPASSED = 1 WHERE RECORD_ID = {0}".format(proc_id)
self.DBClient.executeNoResult(setProcessedSQL)
示例5: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
__author__ = 'sulantha'
import glob, subprocess, re
from Utils.DbUtils import DbUtils
import os
from distutils import file_util, dir_util
import shutil
DBClient = DbUtils()
IID_list = ['I546612', 'I620366', 'I535767', 'I560359', 'I581738']
for iid in IID_list:
getDataFolderSQL = "SELECT RAW_FOLDER FROM Sorting WHERE I_IDENTIFIER = '{0}'".format(iid)
res = DBClient.executeAllResults(getDataFolderSQL)
if len(res) == 0:
pass
else:
rawFolder = res[0][0]
dataFolder = os.path.abspath(os.path.join(rawFolder, '../'))
shutil.rmtree(dataFolder)
print(dataFolder)
delsql = "DELETE FROM Sorting WHERE I_IDENTIFIER = '{0}'".format(iid)
DBClient.executeNoResult(delsql)
delsql = "DELETE FROM Conversion WHERE I_IDENTIFIER = '{0}'".format(iid)
DBClient.executeNoResult(delsql)
getProSQL = "SELECT RECORD_ID, STUDY, MODALITY FROM Processing WHERE I_IDENTIFIER = '{0}'".format(iid)
res2 = DBClient.executeAllResults(getProSQL)
if len(res2) == 0:
pass
else:
P_ID = res2[0][0]
示例6: kill
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
kill(proc2.pid)
if __name__ == '__main__':
os.setpgrp()
try:
if args.study is None and args.type is None and args.user is None and args.createUser is not None:
## Create User
user = input('Admin username : ')
passwd = getpass.getpass('Admin Password : ')
hash_object = hashlib.sha256(passwd.encode('utf-8'))
hex_dig = hash_object.hexdigest()
sql = "SELECT * FROM Auth WHERE USER = '{0}' AND PASS = '{1}' AND LEVEL = 9".format(user, hex_dig)
res = DBClient.executeAllResults(sql)
if len(res) > 0:
if len(DBClient.executeAllResults("SELECT * FROM Auth WHERE USER = '{0}'".format(args.createUser))) > 0:
print('User already exists. ')
sys.exit(0)
newpass1 = getpass.getpass('Enter password for {0} : '.format(args.createUser))
newpass2 = getpass.getpass('Re-enter password : '.format(args.createUser))
if newpass1 == newpass2 :
hash_object = hashlib.sha256(newpass1.encode('utf-8'))
passHex = hash_object.hexdigest()
sqlInsert = "INSERT INTO Auth VALUES (Null, '{0}', 1, '{1}')".format(args.createUser, passHex)
DBClient.executeNoResult(sqlInsert)
示例7: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class Niak:
def __init__(self):
self.DBClient = DbUtils()
def getScanType(self, processingItemObj):
r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
"AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
"AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
processingItemObj.subject_rid,
processingItemObj.scan_date,
processingItemObj.s_identifier,
processingItemObj.i_identifier))
return r[0][0]
def process(self, processingItemObj):
try:
matlabScript, nativeFileName, niakFolder = self.readTemplateFile(processingItemObj)
PipelineLogger.log('manager', 'info', 'NIAK starting for {0}'.format(nativeFileName))
except:
return 0
# Delete PIPE.lock file, if is exists
if os.path.isfile("%s/preprocessing/logs/PIPE.lock" % niakFolder):
os.remove("%s/preprocessing/logs/PIPE.lock" % niakFolder)
success = self.executeScript(processingItemObj, matlabScript, niakFolder)
#### After, if Niak succeeded, concatenate all runs together using combiningRuns
if False:
if success:
self.combiningRuns(processingItemObj)
else:
PipelineLogger.log()
#### Report error
def readTemplateFile(self, processingItemObj):
niakTemplateFile = os.path.dirname(__file__) + '/MatlabScripts/niakPreprocessingTemplate.m'
niakFolder = '{0}/niak'.format(processingItemObj.root_folder)
logDir = '{0}/logs'.format(processingItemObj.root_folder)
# Get the corresponding subject-space MRI path
correspondingMRI = self.findCorrespondingMRI(processingItemObj)
if not correspondingMRI: # If there is no corresponding MRI file
return 0
else:
anat = correspondingMRI + '/civet/native/*t1.mnc' # correspondingMRI[9] returns the root folder of the T1 MRI file
anat = glob.glob(anat)[0]
# Get all subjects
patientInfo = "files_in.subject1.anat = '%s';" % (anat)
for fmri in glob.glob(processingItemObj.converted_folder + '/*.mnc*'):
iteration = fmri[fmri.rindex('_run') + 4 : fmri.rindex('.mnc')]
patientInfo = patientInfo + "\nfiles_in.subject1.fmri.session1{%s} = '%s'" % (iteration, fmri)
# Read templateFileWithInformation
with open(niakTemplateFile, 'r') as templateFile:
templateFileWithInformation = templateFile.read()
templateFile.close()
# Replacing template placeholders with information
replacing_dict = {'%{patient_information}': patientInfo,
'%{opt.folder_out}': niakFolder,
'%{niak_location}': config.niak_location,
'%{nu_correct}': processingItemObj.parameters['nu_correct']
}
templateFileWithInformation = self.replaceString(templateFileWithInformation, replacing_dict)
return templateFileWithInformation, fmri, niakFolder
def findCorrespondingMRI(self, processingItemObj):
# Find Matching T1
matching_t1 = ADNI_T1_Fmri_Helper().getMatchingT1(processingItemObj)
if not matching_t1:
return 0
# Find out whether T1 has been processed
processed = ADNI_T1_Fmri_Helper().checkProcessed(matching_t1)
if not processed:
PipelineLogger.log('root', 'error', 'FMRI cannot be processed due to matching T1 not being processed.')
return 0
else:
return processed
def replaceString(self, templateText, replacing_dict):
for query, replacedInto in replacing_dict.items():
templateText = templateText.replace(query, replacedInto)
return templateText
def createMatlabFile(self, matlabScript, niakFolder):
matlab_file_path = niakFolder + '/preprocessing_script.m'
if not os.path.exists(niakFolder):
os.makedirs(niakFolder)
with open(matlab_file_path, 'w') as matlab_file: # Overwrite previous matlab script file if it already existed
matlab_file.write(matlabScript)
return matlab_file_path
def executeScript(self, processingItemObj, matlabScript, niakFolder):
#.........这里部分代码省略.........
示例8: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
__author__ = 'sulantha'
from Utils.DbUtils import DbUtils
DBClient = DbUtils()
RIDList = ['4225','4746','4799','4136','4142','4192','4713','4960','4387','0021','4827','4579','4580','4616','4668','4696','4809','4549','4680','5012','5019','4674','4757','4385','4721','4947','4714','4715','4736','4706','4720','4661','4728','4767','4739','4089','4379','0382','4732','0230','4586','4653','4671','4742','4369','4589','4730','4676','4689','4722','4723','4587','4631','4632','4672','4678','4756','4711','4764']
for rid in RIDList:
sql1 = "DELETE FROM Sorting WHERE RID = {0} AND SCAN_TYPE NOT IN ('AV45', 'FDG')".format(rid)
DBClient.executeNoResult(sql1)
sql2 = "DELETE FROM Conversion WHERE RID = {0} AND SCAN_TYPE NOT IN ('AV45', 'FDG')".format(rid)
DBClient.executeNoResult(sql2)
sql3 = "SELECT RECORD_ID FROM Processing WHERE RID = {0} AND MODALITY NOT IN ('AV45', 'FDG')".format(rid)
recs = DBClient.executeAllResults(sql3)
for rec in recs:
sql4 = "DELETE FROM ADNI_T1_Pipeline WHERE PROCESSING_TID = {0}".format(rec[0])
DBClient.executeNoResult(sql4)
sql5 = "DELETE FROM Processing WHERE RID = {0} AND MODALITY NOT IN ('AV45', 'FDG')".format(rid)
DBClient.executeNoResult(sql5)
示例9: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class PipelineHandler:
def __init__(self):
self.processingPPDict = {'ADNI':{'V1':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V1_AV45(), 'FDG':ADNI_V1_FDG(), 'AV1451': ADNI_V1_AV1451()},
'V2':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V2_AV45(), 'FDG':ADNI_V2_FDG(), 'AV1451': ADNI_V2_AV1451()}}}
self.DBClient = DbUtils()
self.QCH = QCHandler()
def checkExternalJobs(self, study, modality):
getExtJobSql = "SELECT * FROM externalWaitingJobs WHERE JOB_ID LIKE '{0}_{1}_%'".format(study, modality)
extJobs = self.DBClient.executeAllResults(getExtJobSql)
for job in extJobs:
jobType = job[0].split('_')[-1]
reportTable = job[1]
tableID = job[0].split('_')[2]
reportField = job[2]
subjectScanID = job[0].split('_')[3]
success = 0
if jobType == 'CIVETRUN':
if glob.glob('{0}/{1}_{2}_*'.format(PipelineConfig.T1TempDirForCIVETDownload, study, subjectScanID)):
getProccessRecSql = "SELECT * FROM Processing WHERE RECORD_ID IN (SELECT PROCESSING_TID FROM {0}_T1_Pipeline WHERE RECORD_ID = {1})".format(study, tableID)
processingEntry = self.DBClient.executeAllResults(getProccessRecSql)[0]
civetFolder = '{0}/civet'.format(processingEntry[8])
if os.path.exists(civetFolder):
shutil.rmtree(civetFolder)
try:
PipelineLogger.log('manager', 'info', 'Copying - {0} -> {1}'.format(glob.glob('{0}/{1}_{2}_*'.format(PipelineConfig.T1TempDirForCIVETDownload, study, subjectScanID))[0], civetFolder))
dir_util.copy_tree(glob.glob('{0}/{1}_{2}_*'.format(PipelineConfig.T1TempDirForCIVETDownload, study, subjectScanID))[0], civetFolder)
success = 1
except:
success = 0
else:
continue
else:
PipelineLogger.log('manager', 'error', 'Unknown external job type - {}'.format(jobType))
if success:
updateSQL = "UPDATE {0} SET {1} = 1 WHERE RECORD_ID = {2}".format(reportTable, reportField, tableID)
self.DBClient.executeNoResult(updateSQL)
if jobType == 'CIVETRUN':
finishSQL = "UPDATE {0} SET FINISHED = 1 WHERE RECORD_ID = {1}".format(reportTable, tableID)
self.DBClient.executeNoResult(finishSQL)
modal_table = reportTable
modal_tableId = tableID
qcField = 'QC'
qctype = 'civet'
qcFolder = civetFolder
self.QCH.requestQC(study, modal_table, modal_tableId, qcField, qctype, qcFolder)
rmSql = "DELETE FROM externalWaitingJobs WHERE JOB_ID LIKE '{0}_{1}_{2}_{3}_%'".format(study, modality, tableID, subjectScanID)
self.DBClient.executeNoResult(rmSql)
def process(self, study, modality):
os.environ['PATH'] = ':'.join(libpath.PATH)
os.environ['LD_LIBRARY_PATH'] = ':'.join(libpath.LD_LIBRARY_PATH)
os.environ['LD_LIBRARYN32_PATH'] = ':'.join(libpath.LD_LIBRARYN32_PATH)
os.environ['PERL5LIB'] = ':'.join(libpath.PERL5LIB)
os.environ['MNI_DATAPATH'] = ':'.join(libpath.MNI_DATAPATH)
os.environ['ROOT'] = ';'.join(libpath.ROOT)
os.environ['MINC_TOOLKIT_VERSION'] = libpath.MINC_TOOLKIT_VERSION
os.environ['MINC_COMPRESS'] = libpath.MINC_COMPRESS
os.environ['MINC_FORCE_V2'] = libpath.MINC_FORCE_V2
toProcessinModalityPerStudy = self.DBClient.executeAllResults("SELECT * FROM Processing INNER JOIN (SELECT * FROM {0}_{1}_Pipeline WHERE NOT (FINISHED OR SKIP)) as TMP ON Processing.RECORD_ID=TMP.PROCESSING_TID".format(study, modality))
for processingItem in toProcessinModalityPerStudy:
version = processingItem[10]
# Calling on the process .section of given studies and modalities
self.processingPPDict[study][version][modality].process(processingItem)
return 0
def addToPipelineTable(self, processingObj):
study = processingObj.study
version = processingObj.version
modality = processingObj.modality
r_id = processingObj.record_id
addToTableDict = dict(T1="INSERT IGNORE INTO {0}_T1_Pipeline VALUES (NULL, {1}, \"{2}\", 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultT1config),
AV45="INSERT IGNORE INTO {0}_AV45_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultAV45config, ''),
AV1451="INSERT IGNORE INTO {0}_AV1451_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultAV1451config, ''),
FDG="INSERT IGNORE INTO {0}_FDG_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultFDGconfig, ''),
FMRI="INSERT IGNORE INTO {0}_FMRI_Pipeline VALUES (NULL, {1}, \"{2}\", '{3}', 0, 0, 0, NULL, NULL)".format(study, r_id, PipelineConfig.defaultFMRIconfig, 'NIAK_STH_COMESHERE'))
self.DBClient.executeNoResult(addToTableDict[modality])
示例10: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
__author__ = 'sulantha'
import glob, subprocess, re
from Utils.DbUtils import DbUtils
import os
from distutils import file_util, dir_util
import shutil
DBClient = DbUtils()
getAllTodoSQL = "SELECT XFM_NAME FROM Coregistration WHERE END = 0 AND SKIP = 0 AND START = 0 AND PET_SCANTYPE = 'AV45'"
res = DBClient.executeAllResults(getAllTodoSQL)
totalC = 0
done_c = 0
for xfm_name in res:
xfm_id = xfm_name[0].split('_', 3)[-1]
print(xfm_id)
xfm_file = glob.glob('/data/data03/MANUAL_XFM/{0}.xfm'.format(xfm_name[0]))
#checkSQL = "SELECT * FROM MANUAL_XFM WHERE XFM_UNIQUEID = '{0}'".format(xfm_id)
#res2 = DBClient.executeAllResults(checkSQL)
if len(xfm_file)>0:
done_c +=1
print('Already done. - {0}'.format(xfm_id))
markDoneSQL = "UPDATE Coregistration SET START=1, END=1, USER='admin' WHERE XFM_NAME LIKE '%{0}'".format(xfm_id)
print(markDoneSQL)
#DBClient.executeNoResult(markDoneSQL)
else:
print('Not done. - {0}'.format(xfm_id))
totalC +=1
示例11: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
DBClient = DbUtils()
outLines = []
with open('/home/sulantha/Downloads/Av1451_V2.csv', 'r') as file:
next(file)
for line in file:
row = line.split(',')
rid = row[2]
date = row[4].strip()
dateT = datetime.strptime(date, '%Y-%m-%d')
dateS = dateT.strftime('%Y-%m-%d')
closestAV= ['']*20
closestFD = ['']*20
findAV45 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND PROCESSED = 1 AND QCPASSED = 1 AND VERSION = 'V2'".format(
rid)
resvav45 = DBClient.executeAllResults(findAV45)
if len(resvav45) > 0:
sortedRecs = sorted(resvav45,
key=lambda x: abs(datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))
closestDate = [k for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))][0]
closestMatchedRecs = [list(g) for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))][0]
closestAV = closestMatchedRecs[0]
findFDG = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'FDG' AND PROCESSED = 1 AND QCPASSED = 1 AND VERSION = 'V2'".format(
rid)
resvFDG = DBClient.executeAllResults(findFDG)
if len(resvFDG) > 0:
sortedRecs = sorted(resvFDG,
key=lambda x: abs(datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))
closestDate = [k for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
示例12: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
__author__ = 'Sulantha'
from Utils.DbUtils import DbUtils
import glob, os, sys, fileinput
if __name__ == '__main__':
DBClient = DbUtils()
sql1 = "SELECT * FROM Processing WHERE PROCESSED = 1 AND MODALITY ='T1'"
res = DBClient.executeAllResults(sql1)
for result in res:
proc_id = result[0]
#sql2 = "SELECT * FROM Processing WHERE RECORD_ID = {0}".format(proc_id)
#process_rec = DBClient.executeAllResults(sql2)[0]
T1Path = result[8]
try:
civet_nl_xfm_name = '{0}/civet/transforms/nonlinear/*nlfit_It.xfm'.format(T1Path)
civet_nl_xfm_file = glob.glob(civet_nl_xfm_name)[0]
civet_nl_mnc_name = '{0}/civet/transforms/nonlinear/*nlfit_It_grid_0.mnc'.format(T1Path)
civet_nl_mnc_file = glob.glob(civet_nl_mnc_name)[0]
civet_nl_mnc_name_base = os.path.basename(civet_nl_mnc_file)
for line in fileinput.input(civet_nl_xfm_file, inplace=True):
if 'Displacement_Volume' in line:
line = 'Displacement_Volume = {0};'.format(civet_nl_mnc_name_base)
sys.stdout.write(line)
except:
s = "UPDATE Processing SET QCPASSED = 0 WHERE RECORD_ID = {0}".format(proc_id)
DBClient.executeNoResult(s)
print('Files not found - {0} - {1}'.format(proc_id, T1Path))
示例13: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
DBClient = DbUtils()
with open('/data/data03/sulantha/Downloads/av45_list.csv', 'r') as file:
next(file)
for line in file:
row = line.split(',')
rid = row[0]
date = row[1].strip()
dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
dateS = dateT.strftime('%Y-%m-%d')
findSQL = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}'".format(rid, dateS)
res = DBClient.executeAllResults(findSQL)
print('{0}-{1} {2}'.format(rid, len(res), '############' if len(res) is 0 else '')) if len(res) is 0 else None
processingSQL = "UPDATE Processing SET SKIP = 0 WHERE RID = {0} AND MODALITY = 'AV45' AND SCAN_DATE = '{1}'".format(rid, dateS)
DBClient.executeNoResult(processingSQL)
示例14: DbUtils
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
csvFile = '/data/data03/sulantha/Downloads/av45_list.csv'
MatchDBClient = DbUtils(database='Study_Data.ADNI')
DBClient = DbUtils()
with open(csvFile, 'r') as csv:
next(csv)
for line in csv:
row = line.split(',')
rid = row[0].strip()
date = row[1].strip()
dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
#dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
dateS = dateT.strftime('%Y-%m-%d')
sql = "SELECT DISTINCT subject, visit, seriesid, imageid FROM PET_META_LIST WHERE subject like '%_%_{0}' and scandate = '{1}' and origproc = 'Original'".format(rid, dateS)
result = MatchDBClient.executeAllResults(sql)
checkDBSQL = "SELECT * FROM Conversion WHERE RID = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}'".format(rid, 'S{0}'.format(result[0][2]), 'I{0}'.format(result[0][3]))
#print(checkDBSQL)
resultN = DBClient.executeAllResults(checkDBSQL)
if len(resultN) == 0:
print('########################### Not in DB - {0} - {1}'.format(rid, date))
else:
pass
示例15: __init__
# 需要导入模块: from Utils.DbUtils import DbUtils [as 别名]
# 或者: from Utils.DbUtils.DbUtils import executeAllResults [as 别名]
class ADNI_V2_AV45:
def __init__(self):
self.DBClient = DbUtils()
self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
self.PETHelper = PETHelper()
def process(self, processingItem):
processingItemObj = ProcessingItemObj(processingItem)
matching_t1 = ADNI_T1_Helper().getMatchingT1(processingItemObj)
if not matching_t1:
PipelineLogger.log('root', 'error', 'PET cannot be processed no matching T1 found. - {0} - {1} - {2}.'.format(processingItemObj.subject_rid, processingItemObj.modality, processingItemObj.scan_date))
return 0
processed = ADNI_T1_Helper().checkProcessed(matching_t1)
if not processed:
PipelineLogger.log('root', 'error', 'PET cannot be processed due to matching T1 not being processed - {0}'.format(matching_t1))
return 0
else:
PipelineLogger.log('root', 'INFO', '+++++++++ PET ready to be processed. Will check for initial xfm. - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
if processingItemObj.manual_xfm == 'Req_man_reg':
coregDone = self.PETHelper.checkIfAlreadyDone(processingItemObj, matching_t1)
if coregDone:
manualXFM = coregDone
setPPTableSQL = "UPDATE {0}_{1}_Pipeline SET MANUAL_XFM = '{2}' WHERE RECORD_ID = {3}".format(processingItemObj.study, processingItemObj.modality, manualXFM, processingItemObj.table_id)
self.DBClient.executeNoResult(setPPTableSQL)
self.processPET(processingItemObj, processed)
else:
self.PETHelper.requestCoreg(processingItemObj, matching_t1)
PipelineLogger.log('root', 'INFO', 'Manual XFM was not found. Request to create one may have added. - {0} - {1}'.format(processingItemObj.subject_rid, processingItemObj.scan_date))
return 0
else:
self.processPET(processingItemObj, processed)
def getScanType(self, processingItemObj):
r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
"AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
"AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
processingItemObj.subject_rid,
processingItemObj.scan_date,
processingItemObj.s_identifier,
processingItemObj.i_identifier))
return r[0][0]
def processPET(self, processingItemObj, matchT1Path):
petFileName = '{0}/{1}_{2}{3}{4}{5}_{6}.mnc'.format(processingItemObj.converted_folder, processingItemObj.study,
processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''),
processingItemObj.s_identifier, processingItemObj.i_identifier,
self.getScanType(processingItemObj))
processedFolder = '{0}/processed'.format(processingItemObj.root_folder)
logDir = '{0}/logs'.format(processingItemObj.root_folder)
PipelineLogger.log('manager', 'info', 'PET processing starting for {0}'.format(petFileName))
try:
distutils.dir_util.mkpath(logDir)
except Exception as e:
PipelineLogger.log('manager', 'error', 'Error in creating log folder \n {0}'.format(e))
return 0
id = '{0}{1}{2}{3}'.format(processingItemObj.subject_rid, processingItemObj.scan_date.replace('-', ''), processingItemObj.s_identifier, processingItemObj.i_identifier)
paramStrd = ast.literal_eval(processingItemObj.parameters)
paramStrt = ' '.join(['[\"{0}\"]=\"{1}\"'.format(k, v) for k,v in paramStrd.items()])
paramStr = '({0})'.format(paramStrt)
petCMD = "source /opt/minc-toolkit/minc-toolkit-config.sh; Pipelines/ADNI_AV45/ADNI_V2_AV45_Process {0} {1} {2} {3} {4} {5} '{6}' {7} {8}".format(id, petFileName, processedFolder, matchT1Path, 'auto' if processingItemObj.manual_xfm == '' else processingItemObj.manual_xfm, logDir, paramStr,socket.gethostname(), 50500)
try:
processedFolder_del = '{0}/processed_del'.format(processingItemObj.root_folder)
os.rename(processedFolder, processedFolder_del)
shutil.rmtree(processedFolder_del)
except Exception as e:
PipelineLogger.log('manager', 'error', 'Error in deleting old processing folder. \n {0}'.format(e))
try:
distutils.dir_util.mkpath(processedFolder)
except Exception as e:
PipelineLogger.log('manager', 'error', 'Error in creating processing folder. \n {0}'.format(e))
return 0
PipelineLogger.log('manager', 'debug', 'Command : {0}'.format(petCMD))
p = subprocess.Popen(petCMD, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, executable='/bin/bash')
out, err = p.communicate()
PipelineLogger.log('manager', 'debug', 'Process Log Output : \n{0}'.format(out))
PipelineLogger.log('manager', 'debug', 'Process Log Err : \n{0}'.format(err))
QSubJobHandler.submittedJobs[id] = QSubJob(id, '02:00:00', processingItemObj, 'av45')
return 1