本文整理汇总了Python中Utils.DbUtils.DbUtils类的典型用法代码示例。如果您正苦于以下问题:Python DbUtils类的具体用法?Python DbUtils怎么用?Python DbUtils使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DbUtils类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
class Processing:
def __init__(self):
self.DBClient = DbUtils()
self.sqlBuilder = SQLBuilder()
def getObjectFromTuple(self, tuple):
valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], modality=tuple[3],
scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
s_identifier=tuple[6], i_identifier=tuple[7], root_folder=tuple[8], converted_folder=tuple[9], version=tuple[10],
processed=tuple[12])
return ProcessingObject(valuesDict)
def insertToTable(self, objList):
for obj in objList:
self.DBClient.executeNoResult(
self.sqlBuilder.getSQL_AddNewEntryToProcessingTable(obj.sqlInsert()))
def insertFromConvertionObj(self, convertionObj):
convertionValues = convertionObj.getValuesDict()
convertionValues['modality'] = sc.ProcessingModalityAndPipelineTypePerStudy[convertionObj.study][convertionObj.scan_type]
convertionValues['root_folder'] = '/'.join(convertionObj.converted_folder.split('/')[0:-2]) # Keeping only the three last elements
self.insertToTable([ProcessingObject(convertionValues)])
def getToProcessListPerStudy(self, study):
toProcessList = self.DBClient.executeAllResults(
self.sqlBuilder.getSQL_getToBeProcessedFromProcessingTable(study))
return [self.getObjectFromTuple(t) for t in toProcessList]
示例2: __init__
class Sorting:
def __init__(self):
self.tableName = 'Sorting'
self.DBClient = DbUtils()
self.sqlBuilder = SQLBuilder()
def getObjectFromTuple(self, tuple):
valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], scan_type=tuple[3],
scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
s_identifier=tuple[6], i_identifier=tuple[7], file_type=tuple[8], download_folder=tuple[9],
raw_folder=tuple[10], moved=tuple[11])
return SortingObject(valuesDict)
def insertToTable(self, objList):
for obj in objList:
self.DBClient.executeNoResult(
self.sqlBuilder.getSQL_AddNewEntryToSortingTable(obj.sqlInsert()))
def getUnmovedFilesPerStudy(self, study):
unmovedList = self.DBClient.executeAllResults(
self.sqlBuilder.getSQL_getUnmovedFilesFromSortingTable(study, tuple(sc.ProcessingModalityAndPipelineTypePerStudy[study].keys())))
return [self.getObjectFromTuple(t) for t in unmovedList]
def setMovedTrue(self, sortingObj):
sortingObj.moved = 1
self.saveObj(sortingObj)
def saveObj(self, sortingObj):
self.DBClient.executeNoResult(self.sqlBuilder.getSQL_saveObjSortingTable(sortingObj))
示例3: __init__
class CoregHandler:
def __init__(self):
self.DBClient = DbUtils()
def requestCoreg(self, study, rid, type, pet_folder, t1_folder, petScanType, t1ScanType, xfm_name):
regsql = "INSERT IGNORE INTO Coregistration VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', 0, 0, 0, Null)".format(study.upper(), rid,
type.upper(),
pet_folder,
t1_folder, petScanType, t1ScanType, xfm_name)
self.DBClient.executeNoResult(regsql)
示例4: __init__
class AdniCsvImport:
def __init__(self, inputFolder, database_location):
# Initiate Database Client
self.DbClient = DbUtils(database=database_location)
# For each csv file, import it into the SQL database
for inputFile in glob.glob(inputFolder + "/*.csv"):
if inputFile in CSVconfig.AdniIgnored:
continue
sqlLocation = os.path.basename(inputFile).replace(".csv", "")
with open(inputFile, "r") as csvFile:
csvToDatabase(self.DbClient, csvFile, sqlLocation)
# Close the connection to the database
self.DbClient.close()
示例5: __init__
class QCHandler:
def __init__(self):
self.DBClient = DbUtils()
def requestQC(self, study, modal_table, modal_tableId, qcField, qctype, qcFolder):
qcsql = "INSERT IGNORE INTO QC VALUES (Null, '{0}', '{1}', '{2}', '{3}', '{4}','{5}' , 0, 0, 0, 0, Null)".format(study.upper(), modal_table,
modal_tableId,
qcField,
qctype, qcFolder)
self.DBClient.executeNoResult(qcsql)
def checkQCJobs(self, study, modality):
sql = "SELECT * FROM {0}_{1}_Pipeline WHERE QC = 1 AND FINISHED = 1".format(study, modality)
res = self.DBClient.executeAllResults(sql)
if len(res) < 1:
return 0
else:
for result in res:
proc_id = result[1]
setProcessedSQL = "UPDATE Processing SET PROCESSED = 1, QCPASSED = 1 WHERE RECORD_ID = {0}".format(proc_id)
self.DBClient.executeNoResult(setProcessedSQL)
示例6: __init__
class Conversion:
def __init__(self):
self.tableName = 'Conversion'
self.DBClient = DbUtils()
self.sqlBuilder = SQLBuilder()
def getObjectFromTuple(self, tuple):
valuesDict = dict(record_id=tuple[0], study=tuple[1], rid=tuple[2], scan_type=tuple[3],
scan_date=tuple[4].strftime("%Y-%m-%d"), scan_time=str(tuple[5]),
s_identifier=tuple[6], i_identifier=tuple[7], file_type=tuple[8], raw_folder=tuple[9],
converted_folder=tuple[10], version=tuple[11], converted=tuple[12])
return ConversionObject(valuesDict)
def insertToTable(self, objList):
for obj in objList:
self.DBClient.executeNoResult(
self.sqlBuilder.getSQL_AddNewEntryToConversionTable(obj.sqlInsert()))
def get_version(self, sortingObj, versionDict):
if sortingObj.study == 'ADNI':
dl_path = sortingObj.download_folder
if 'Uniform' in dl_path:
return 'V2'
else:
return versionDict[sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type] in versionDict else 'V1'
else:
return versionDict[sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type]] if sc.ProcessingModalityAndPipelineTypePerStudy[sortingObj.study][sortingObj.scan_type] in versionDict else 'V1'
def insertFromSortingObj(self, sortingObj, versionDict):
sortingValues = sortingObj.getValuesDict()
version = self.get_version(sortingObj, versionDict)
sortingValues['converted_folder'] = '{0}/{1}/{2}/{3}/{4}_{5}_{6}/{7}/converted/final'.format(sc.studyDatabaseRootDict[sortingObj.study],
sortingObj.study, sortingObj.scan_type, sortingObj.rid,
sortingObj.scan_date, sortingObj.s_identifier, sortingObj.i_identifier, version)
sortingValues['version'] = version
sortingValues['converted'] = 0
self.insertToTable([ConversionObject(sortingValues)])
def gettoBeConvertedPerStudy(self, study):
toConvertList = self.DBClient.executeAllResults(
self.sqlBuilder.getSQL_getToBeConvertedFileFromConversionTable(study))
return [self.getObjectFromTuple(t) for t in toConvertList]
def setConvertedTrue(self, convertionObj):
convertionObj.converted = 1
self.saveObj(convertionObj)
def setConvertedFailed(self, convertionObj):
convertionObj.skip = 1
self.saveObj(convertionObj)
def saveObj(self, convertionObj):
self.DBClient.executeNoResult(self.sqlBuilder.getSQL_saveObjConversionTable(convertionObj))
def getConvertedListPerStudy(self, study):
convertedList = self.DBClient.executeAllResults(self.sqlBuilder.getSQL_getAllConvertedFromConvertionTable(study))
return [self.getObjectFromTuple(t) for t in convertedList]
示例7: __init__
class QSubJobStatusReporter:
def __init__(self):
self.DBClient = DbUtils()
self.QCHandler = QCHandler()
def setStatus(self, job, status):
if job.jobType == 'beast':
nestedJob = job.job
table = '{0}_{1}_Pipeline'.format(nestedJob.study, nestedJob.modality)
table_id = nestedJob.table_id
if status == 'Success':
setSql = 'UPDATE {0} SET BEAST_MASK = 1 WHERE RECORD_ID = {1}'.format(table, table_id)
elif status == 'Fail':
setSql = 'UPDATE {0} SET BEAST_MASK = -1, BEAST_SKIP = 1 WHERE RECORD_ID = {1}'.format(table, table_id)
self.DBClient.executeNoResult(setSql)
if status == 'Fail':
PipelineLogger.log('manager', 'error','QSUB job Status Failed: - {0} - Processing Table ID : {1} - Modality Table ID : {2}'.format(job.jobType, nestedJob.processing_rid, nestedJob.table_id))
if job.jobType == 'av45':
nestedJob = job.job
table = '{0}_{1}_Pipeline'.format(nestedJob.study, nestedJob.modality)
table_id = nestedJob.table_id
if status == 'Success':
setSql = "UPDATE {0} SET FINISHED = 1, PROC_Failed = Null WHERE RECORD_ID = {1}".format(table, table_id)
self.requestQC(nestedJob, 'av45')
elif status == 'Fail':
setSql = "UPDATE {0} SET PROC_Failed = 'Failed' , SKIP = 1 WHERE RECORD_ID = {1}".format(table, table_id)
self.DBClient.executeNoResult(setSql)
if status == 'Fail':
PipelineLogger.log('manager', 'error','QSUB job Status Failed: - {0} - Processing Table ID : {1} - Modality Table ID : {2}'.format(job.jobType, nestedJob.processing_rid, nestedJob.table_id))
if job.jobType == 'fdg':
nestedJob = job.job
table = '{0}_{1}_Pipeline'.format(nestedJob.study, nestedJob.modality)
table_id = nestedJob.table_id
if status == 'Success':
setSql = "UPDATE {0} SET FINISHED = 1, PROC_Failed = Null WHERE RECORD_ID = {1}".format(table, table_id)
self.requestQC(nestedJob, 'fdg')
elif status == 'Fail':
setSql = "UPDATE {0} SET PROC_Failed = 'Failed' , SKIP = 1 WHERE RECORD_ID = {1}".format(table, table_id)
self.DBClient.executeNoResult(setSql)
if status == 'Fail':
PipelineLogger.log('manager', 'error','QSUB job Status Failed: - {0} - Processing Table ID : {1} - Modality Table ID : {2}'.format(job.jobType, nestedJob.processing_rid, nestedJob.table_id))
def requestQC(self, processingItemObj, qctype):
qcFieldDict = dict(civet='QC', beast='BEAST_QC', av45='QC', fdg='QC')
qcFolderDict = { 'civet' : '{0}/civet'.format(processingItemObj.root_folder),
'beast' : '{0}/beast'.format(processingItemObj.root_folder),
'av45' : '{0}/processed'.format(processingItemObj.root_folder),
'fdg' : '{0}/processed'.format(processingItemObj.root_folder)}
self.QCHandler.requestQC(processingItemObj.study, '{0}_{1}_Pipeline'.format(processingItemObj.study,
processingItemObj.modality),
processingItemObj.table_id, qcFieldDict[qctype], qctype, qcFolderDict[qctype])
示例8: DbUtils
__author__ = 'sulantha'
from Utils.DbUtils import DbUtils
DBClient = DbUtils()
RIDList = ['4225','4746','4799','4136','4142','4192','4713','4960','4387','0021','4827','4579','4580','4616','4668','4696','4809','4549','4680','5012','5019','4674','4757','4385','4721','4947','4714','4715','4736','4706','4720','4661','4728','4767','4739','4089','4379','0382','4732','0230','4586','4653','4671','4742','4369','4589','4730','4676','4689','4722','4723','4587','4631','4632','4672','4678','4756','4711','4764']
for rid in RIDList:
sql1 = "DELETE FROM Sorting WHERE RID = {0} AND SCAN_TYPE NOT IN ('AV45', 'FDG')".format(rid)
DBClient.executeNoResult(sql1)
sql2 = "DELETE FROM Conversion WHERE RID = {0} AND SCAN_TYPE NOT IN ('AV45', 'FDG')".format(rid)
DBClient.executeNoResult(sql2)
sql3 = "SELECT RECORD_ID FROM Processing WHERE RID = {0} AND MODALITY NOT IN ('AV45', 'FDG')".format(rid)
recs = DBClient.executeAllResults(sql3)
for rec in recs:
sql4 = "DELETE FROM ADNI_T1_Pipeline WHERE PROCESSING_TID = {0}".format(rec[0])
DBClient.executeNoResult(sql4)
sql5 = "DELETE FROM Processing WHERE RID = {0} AND MODALITY NOT IN ('AV45', 'FDG')".format(rid)
DBClient.executeNoResult(sql5)
示例9: DbUtils
__author__ = 'sulantha'
import datetime
from Utils.DbUtils import DbUtils
csvFile = '/data/data03/sulantha/Downloads/av45_list.csv'
MatchDBClient = DbUtils(database='Study_Data.ADNI')
DBClient = DbUtils()
with open(csvFile, 'r') as csv:
next(csv)
for line in csv:
row = line.split(',')
rid = row[0].strip()
date = row[1].strip()
dateT = datetime.datetime.strptime(date, '%m/%d/%Y')
#dateT = datetime.datetime.strptime(date, '%Y-%m-%d')
dateS = dateT.strftime('%Y-%m-%d')
sql = "SELECT DISTINCT subject, visit, seriesid, imageid FROM PET_META_LIST WHERE subject like '%_%_{0}' and scandate = '{1}' and origproc = 'Original'".format(rid, dateS)
result = MatchDBClient.executeAllResults(sql)
checkDBSQL = "SELECT * FROM Conversion WHERE RID = '{0}' AND S_IDENTIFIER = '{1}' AND I_IDENTIFIER = '{2}'".format(rid, 'S{0}'.format(result[0][2]), 'I{0}'.format(result[0][3]))
#print(checkDBSQL)
resultN = DBClient.executeAllResults(checkDBSQL)
if len(resultN) == 0:
print('########################### Not in DB - {0} - {1}'.format(rid, date))
else:
pass
示例10: __init__
def __init__(self):
self.processingPPDict = {'ADNI':{'V1':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V1_AV45(), 'FDG':ADNI_V1_FDG(), 'AV1451': ADNI_V1_AV1451()},
'V2':{'T1':ADNI_V1_T1(), 'FMRI':ADNI_V1_FMRI(), 'AV45':ADNI_V2_AV45(), 'FDG':ADNI_V2_FDG(), 'AV1451': ADNI_V2_AV1451()}}}
self.DBClient = DbUtils()
self.QCH = QCHandler()
示例11: __init__
def __init__(self):
self.DBClient = DbUtils()
self.MatchDBClient = DbUtils(database=pc.ADNI_dataMatchDBName)
self.PETHelper = PETHelper()
示例12: DbUtils
__author__ = 'sulantha'
import glob, subprocess, re
from Utils.DbUtils import DbUtils
import os
from distutils import file_util, dir_util
import shutil
DBClient = DbUtils()
IID_list = ['I546612', 'I620366', 'I535767', 'I560359', 'I581738']
for iid in IID_list:
getDataFolderSQL = "SELECT RAW_FOLDER FROM Sorting WHERE I_IDENTIFIER = '{0}'".format(iid)
res = DBClient.executeAllResults(getDataFolderSQL)
if len(res) == 0:
pass
else:
rawFolder = res[0][0]
dataFolder = os.path.abspath(os.path.join(rawFolder, '../'))
shutil.rmtree(dataFolder)
print(dataFolder)
delsql = "DELETE FROM Sorting WHERE I_IDENTIFIER = '{0}'".format(iid)
DBClient.executeNoResult(delsql)
delsql = "DELETE FROM Conversion WHERE I_IDENTIFIER = '{0}'".format(iid)
DBClient.executeNoResult(delsql)
getProSQL = "SELECT RECORD_ID, STUDY, MODALITY FROM Processing WHERE I_IDENTIFIER = '{0}'".format(iid)
res2 = DBClient.executeAllResults(getProSQL)
if len(res2) == 0:
pass
else:
P_ID = res2[0][0]
示例13: DbUtils
__author__ = 'sulantha'
xfmList = '/home/sulantha/Desktop/petMatchNew2.csv'
outputpath = '/data/data03/MANUAL_XFM'
from Utils.DbUtils import DbUtils
import shutil
Dbclient = DbUtils()
with open(xfmList, 'r') as inf:
for line in inf:
row = line.split(',')
if row[0].strip() == 'None' or row[2].strip() == 'None' or row[4].strip() == 'None':
pass
else:
study = row[0].split('/')[-1].split('_')[0].upper()
rid = row[0].split('/')[-1].split('_')[1][2:-2]
petsid = row[2].split('.')[0].split('_')[-2]
petiid = row[2].split('.')[0].split('_')[-1]
t1sid = row[4].split('.')[0].split('_')[-2]
t1iid = row[4].split('.')[0].split('_')[-1]
uid = 'PET_{0}_{1}_T1_{2}_{3}'.format(petsid, petiid, t1sid, t1iid)
path = '{0}/{1}_{2}_{3}.xfm'.format(outputpath, study, rid, uid)
if petiid.startswith('I') and petsid.startswith('S'):
print(study, rid, uid, sep=', ')
else:
print('PET - {0}'.format(row[2]))
try:
shutil.copyfile(row[0], path)
except Exception as e:
print('Error copy. {0}'.format(e))
示例14: __init__
class Niak:
def __init__(self):
self.DBClient = DbUtils()
def getScanType(self, processingItemObj):
r = self.DBClient.executeAllResults("SELECT SCAN_TYPE FROM Conversion WHERE STUDY = '{0}' AND RID = '{1}' "
"AND SCAN_DATE = '{2}' AND S_IDENTIFIER = '{3}' "
"AND I_IDENTIFIER = '{4}'".format(processingItemObj.study,
processingItemObj.subject_rid,
processingItemObj.scan_date,
processingItemObj.s_identifier,
processingItemObj.i_identifier))
return r[0][0]
def process(self, processingItemObj):
try:
matlabScript, nativeFileName, niakFolder = self.readTemplateFile(processingItemObj)
PipelineLogger.log('manager', 'info', 'NIAK starting for {0}'.format(nativeFileName))
except:
return 0
# Delete PIPE.lock file, if is exists
if os.path.isfile("%s/preprocessing/logs/PIPE.lock" % niakFolder):
os.remove("%s/preprocessing/logs/PIPE.lock" % niakFolder)
success = self.executeScript(processingItemObj, matlabScript, niakFolder)
#### After, if Niak succeeded, concatenate all runs together using combiningRuns
if False:
if success:
self.combiningRuns(processingItemObj)
else:
PipelineLogger.log()
#### Report error
def readTemplateFile(self, processingItemObj):
niakTemplateFile = os.path.dirname(__file__) + '/MatlabScripts/niakPreprocessingTemplate.m'
niakFolder = '{0}/niak'.format(processingItemObj.root_folder)
logDir = '{0}/logs'.format(processingItemObj.root_folder)
# Get the corresponding subject-space MRI path
correspondingMRI = self.findCorrespondingMRI(processingItemObj)
if not correspondingMRI: # If there is no corresponding MRI file
return 0
else:
anat = correspondingMRI + '/civet/native/*t1.mnc' # correspondingMRI[9] returns the root folder of the T1 MRI file
anat = glob.glob(anat)[0]
# Get all subjects
patientInfo = "files_in.subject1.anat = '%s';" % (anat)
for fmri in glob.glob(processingItemObj.converted_folder + '/*.mnc*'):
iteration = fmri[fmri.rindex('_run') + 4 : fmri.rindex('.mnc')]
patientInfo = patientInfo + "\nfiles_in.subject1.fmri.session1{%s} = '%s'" % (iteration, fmri)
# Read templateFileWithInformation
with open(niakTemplateFile, 'r') as templateFile:
templateFileWithInformation = templateFile.read()
templateFile.close()
# Replacing template placeholders with information
replacing_dict = {'%{patient_information}': patientInfo,
'%{opt.folder_out}': niakFolder,
'%{niak_location}': config.niak_location,
'%{nu_correct}': processingItemObj.parameters['nu_correct']
}
templateFileWithInformation = self.replaceString(templateFileWithInformation, replacing_dict)
return templateFileWithInformation, fmri, niakFolder
def findCorrespondingMRI(self, processingItemObj):
# Find Matching T1
matching_t1 = ADNI_T1_Fmri_Helper().getMatchingT1(processingItemObj)
if not matching_t1:
return 0
# Find out whether T1 has been processed
processed = ADNI_T1_Fmri_Helper().checkProcessed(matching_t1)
if not processed:
PipelineLogger.log('root', 'error', 'FMRI cannot be processed due to matching T1 not being processed.')
return 0
else:
return processed
def replaceString(self, templateText, replacing_dict):
for query, replacedInto in replacing_dict.items():
templateText = templateText.replace(query, replacedInto)
return templateText
def createMatlabFile(self, matlabScript, niakFolder):
matlab_file_path = niakFolder + '/preprocessing_script.m'
if not os.path.exists(niakFolder):
os.makedirs(niakFolder)
with open(matlab_file_path, 'w') as matlab_file: # Overwrite previous matlab script file if it already existed
matlab_file.write(matlabScript)
return matlab_file_path
def executeScript(self, processingItemObj, matlabScript, niakFolder):
#.........这里部分代码省略.........
示例15: DbUtils
from datetime import datetime
from Utils.DbUtils import DbUtils
import glob
import itertools
DBClient = DbUtils()
outLines = []
with open('/home/sulantha/Downloads/Av1451_V2.csv', 'r') as file:
next(file)
for line in file:
row = line.split(',')
rid = row[2]
date = row[4].strip()
dateT = datetime.strptime(date, '%Y-%m-%d')
dateS = dateT.strftime('%Y-%m-%d')
closestAV= ['']*20
closestFD = ['']*20
findAV45 = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'AV45' AND PROCESSED = 1 AND QCPASSED = 1 AND VERSION = 'V2'".format(
rid)
resvav45 = DBClient.executeAllResults(findAV45)
if len(resvav45) > 0:
sortedRecs = sorted(resvav45,
key=lambda x: abs(datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))
closestDate = [k for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))][0]
closestMatchedRecs = [list(g) for k, g in itertools.groupby(sortedRecs, key=lambda x: abs(
datetime.strptime(x[4].strftime('%Y-%m-%d'), '%Y-%m-%d') - dateT))][0]
closestAV = closestMatchedRecs[0]
findFDG = "SELECT * FROM Processing WHERE RID = {0} AND MODALITY = 'FDG' AND PROCESSED = 1 AND QCPASSED = 1 AND VERSION = 'V2'".format(
rid)
resvFDG = DBClient.executeAllResults(findFDG)