本文整理汇总了Python中taca.utils.config.CONFIG类的典型用法代码示例。如果您正苦于以下问题:Python CONFIG类的具体用法?Python CONFIG怎么用?Python CONFIG使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CONFIG类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cleanup_nas
def cleanup_nas(seconds):
"""Will move the finished runs in NASes to nosync directory.
:param int seconds: Days/hours converted as second to consider a run to be old
"""
couch_info = CONFIG.get('statusdb')
mail_recipients = CONFIG.get('mail', {}).get('recipients')
check_demux = CONFIG.get('storage', {}).get('check_demux', False)
host_name = os.getenv('HOSTNAME', os.uname()[1]).split('.', 1)[0]
for data_dir in CONFIG.get('storage').get('data_dirs'):
logger.info('Moving old runs in {}'.format(data_dir))
with filesystem.chdir(data_dir):
for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
rta_file = os.path.join(run, finished_run_indicator)
if os.path.exists(rta_file):
if check_demux:
if misc.run_is_demuxed(run, couch_info):
logger.info('Moving run {} to nosync directory'.format(os.path.basename(run)))
shutil.move(run, 'nosync')
elif os.stat(rta_file).st_mtime < time.time() - seconds:
logger.warn('Run {} is older than given time, but it is not demultiplexed yet'
.format(run))
sbt = "Run not demultiplexed - {}".format(run)
msg = ("Run '{}' in '{}' is older then given threshold, but seems like it is not "
"yet demultiplexed".format(os.path.join(data_dir, run), host_name))
misc.send_mail(sbt, msg, mail_recipients)
else:
if os.stat(rta_file).st_mtime < time.time() - seconds:
logger.info('Moving run {} to nosync directory'.format(os.path.basename(run)))
shutil.move(run, 'nosync')
else:
logger.info('{} file exists but is not older than given time, skipping run {}'
.format(finished_run_indicator, run))
示例2: cleanup_processing
def cleanup_processing(seconds):
"""Cleanup runs in processing server.
:param int seconds: Days/hours converted as second to consider a run to be old
"""
try:
#Remove old runs from archiving dirs
for archive_dir in CONFIG.get('storage').get('archive_dirs').values():
logger.info('Removing old runs in {}'.format(archive_dir))
with filesystem.chdir(archive_dir):
for run in [r for r in os.listdir(archive_dir) if re.match(filesystem.RUN_RE, r)]:
rta_file = os.path.join(run, finished_run_indicator)
if os.path.exists(rta_file):
if os.stat(rta_file).st_mtime < time.time() - seconds:
logger.info('Removing run {} to nosync directory'.format(os.path.basename(run)))
shutil.rmtree(run)
else:
logger.info('{} file exists but is not older than given time, skipping run {}'.format(
finished_run_indicator, run))
except IOError:
sbj = "Cannot archive old runs in processing server"
msg = ("Could not find transfer.tsv file, so I cannot decide if I should "
"archive any run or not.")
cnt = CONFIG.get('contact', None)
if not cnt:
cnt = "{}@localhost".format(getpass.getuser())
logger.error(msg)
misc.send_mail(sbj, msg, cnt)
示例3: cleanup_uppmax
def cleanup_uppmax(site, days, dry_run=False):
"""Remove project/run that have been closed more than 'days'
from the given 'site' on uppmax
:param str site: site where the cleanup should be performed
:param int days: number of days to check for closed projects
"""
days = check_days(site, days, config)
if not days:
return
root_dir = CONFIG.get("cleanup").get(site).get("root")
deleted_log = CONFIG.get("cleanup").get("deleted_log")
assert os.path.exists(os.path.join(root_dir, deleted_log)), "Log directory {} doesn't exist in {}".format(
deleted_log, root_dir
)
log_file = os.path.join(root_dir, "{fl}/{fl}.log".format(fl=deleted_log))
# make a connection for project db #
pcon = statusdb.ProjectSummaryConnection()
assert pcon, "Could not connect to project database in StatusDB"
if site != "archive":
## work flow for cleaning up illumina/analysis ##
projects = [p for p in os.listdir(root_dir) if re.match(filesystem.PROJECT_RE, p)]
list_to_delete = get_closed_projects(projects, pcon, days)
else:
##work flow for cleaning archive ##
list_to_delete = []
archived_in_swestore = filesystem.list_runs_in_swestore(
path=CONFIG.get("cleanup").get("swestore").get("root"), no_ext=True
)
runs = [r for r in os.listdir(root_dir) if re.match(filesystem.RUN_RE, r)]
with filesystem.chdir(root_dir):
for run in runs:
fc_date = run.split("_")[0]
if misc.days_old(fc_date) > days:
if run in archived_in_swestore:
list_to_delete.append(run)
else:
logger.warn(
"Run {} is older than {} days but not in " "swestore, so SKIPPING".format(run, days)
)
## delete and log
for item in list_to_delete:
if dry_run:
logger.info("Will remove {} from {}".format(item, root_dir))
continue
try:
shutil.rmtree(os.path.join(root_dir, item))
logger.info("Removed project {} from {}".format(item, root_dir))
with open(log_file, "a") as to_log:
to_log.write("{}\t{}\n".format(item, datetime.strftime(datetime.now(), "%Y-%m-%d %H:%M")))
except OSError:
logger.warn("Could not remove path {} from {}".format(item, root_dir))
continue
示例4: _archive_run
def _archive_run((run, days, force, compress_only)):
""" Archive a specific run to swestore
:param str run: Run directory
:param int days: Days to consider a run old
:param bool force: Force the archiving even if the run is not complete
:param bool compress_only: Only compress the run without sending it to swestore
"""
def _send_to_swestore(f, dest, remove=True):
""" Send file to swestore checking adler32 on destination and eventually
removing the file from disk
:param str f: File to remove
:param str dest: Destination directory in Swestore
:param bool remove: If True, remove original file from source
"""
if not filesystem.is_in_swestore(f):
logger.info("Sending {} to swestore".format(f))
misc.call_external_command("iput -K -P {file} {dest}".format(file=f, dest=dest), with_log_files=True)
logger.info("Run {} sent correctly and checksum was okay.".format(f))
if remove:
logger.info("Removing run".format(f))
os.remove(f)
else:
logger.warn("Run {} is already in Swestore, not sending it again nor removing from the disk".format(f))
# Create state file to say that the run is being archived
open("{}.archiving".format(run.split(".")[0]), "w").close()
if run.endswith("bz2"):
if os.stat(run).st_mtime < time.time() - (86400 * days):
_send_to_swestore(run, CONFIG.get("storage").get("irods").get("irodsHome"))
else:
logger.info("Run {} is not {} days old yet. Not archiving".format(run, str(days)))
else:
rta_file = os.path.join(run, "RTAComplete.txt")
if not os.path.exists(rta_file) and not force:
logger.warn(
(
"Run {} doesn't seem to be completed and --force option was "
"not enabled, not archiving the run".format(run)
)
)
if force or (os.path.exists(rta_file) and os.stat(rta_file).st_mtime < time.time() - (86400 * days)):
logger.info("Compressing run {}".format(run))
# Compress with pbzip2
misc.call_external_command("tar --use-compress-program=pbzip2 -cf {run}.tar.bz2 {run}".format(run=run))
logger.info("Run {} successfully compressed! Removing from disk...".format(run))
shutil.rmtree(run)
if not compress_only:
_send_to_swestore("{}.tar.bz2".format(run), CONFIG.get("storage").get("irods").get("irodsHome"))
else:
logger.info("Run {} is not completed or is not {} days old yet. Not archiving".format(run, str(days)))
os.remove("{}.archiving".format(run.split(".")[0]))
示例5: fetch_config_info
def fetch_config_info(self):
"""Try to fecth required info from the config file. Log and exit if any neccesary info is missing"""
try:
self.data_dirs = CONFIG['backup']['data_dirs']
self.archive_dirs = CONFIG['backup']['archive_dirs']
self.keys_path = CONFIG['backup']['keys_path']
self.gpg_receiver = CONFIG['backup']['gpg_receiver']
self.mail_recipients = CONFIG['mail']['recipients']
self.check_demux = CONFIG.get('backup', {}).get('check_demux', False)
self.couch_info = CONFIG.get('statusdb')
except KeyError as e:
logger.error("Config file is missing the key {}, make sure it have all required information".format(str(e)))
raise SystemExit
示例6: _archive_run
def _archive_run((run, seconds, force, compress_only)):
""" Archive a specific run to swestore
:param str run: Run directory
:param int seconds: Days/hours converted as seconds to check
:param bool force: Force the archiving even if the run is not complete
:param bool compress_only: Only compress the run without sending it to swestore
"""
def _send_to_swestore(f, dest, remove=True):
""" Send file to swestore checking adler32 on destination and eventually
removing the file from disk
:param str f: File to remove
:param str dest: Destination directory in Swestore
:param bool remove: If True, remove original file from source
"""
if not filesystem.is_in_swestore(f):
logger.info("Sending {} to swestore".format(f))
misc.call_external_command('iput -R swestoreArchCacheResc -P {file} {dest}'.format(file=f, dest=dest),
with_log_files=True, prefix=f.replace('.tar.bz2',''), log_dir="swestore_logs")
logger.info('Run {} sent to swestore.'.format(f))
if remove:
logger.info('Removing run'.format(f))
os.remove(f)
else:
logger.warn('Run {} is already in Swestore, not sending it again nor removing from the disk'.format(f))
# Create state file to say that the run is being archived
open("{}.archiving".format(run.split('.')[0]), 'w').close()
if run.endswith('bz2'):
if os.stat(run).st_mtime < time.time() - seconds:
_send_to_swestore(run, CONFIG.get('storage').get('irods').get('irodsHome'))
else:
logger.info("Run {} is not older than given time yet. Not archiving".format(run))
else:
rta_file = os.path.join(run, finished_run_indicator)
if not os.path.exists(rta_file) and not force:
logger.warn(("Run {} doesn't seem to be completed and --force option was "
"not enabled, not archiving the run".format(run)))
if force or (os.path.exists(rta_file) and os.stat(rta_file).st_mtime < time.time() - seconds):
logger.info("Compressing run {}".format(run))
# Compress with pbzip2
misc.call_external_command('tar --use-compress-program=pbzip2 -cf {run}.tar.bz2 {run}'.format(run=run))
logger.info('Run {} successfully compressed! Removing from disk...'.format(run))
shutil.rmtree(run)
if not compress_only:
_send_to_swestore('{}.tar.bz2'.format(run), CONFIG.get('storage').get('irods').get('irodsHome'))
else:
logger.info("Run {} is not completed or is not older than given time yet. Not archiving".format(run))
os.remove("{}.archiving".format(run.split('.')[0]))
示例7: transfer_run
def transfer_run(run_dir, analysis):
""" Interface for click to force a transfer a run to uppmax
:param: string run_dir: the run to tranfer
:param bool analysis: if trigger or not the analysis
"""
runObj = get_runObj(run_dir)
mail_recipients = CONFIG.get('mail', {}).get('recipients')
if runObj is None:
mail_recipients = CONFIG.get('mail', {}).get('recipients')
# Maybe throw an exception if possible?
logger.error("Trying to force a transfer of run {} but the sequencer was not recognized.".format(run_dir))
else:
runObj.transfer_run(os.path.join("nosync",CONFIG['analysis']['status_dir'], 'transfer.tsv'),
analysis, mail_recipients) # do not start analsysis automatically if I force the transfer
示例8: fail_run
def fail_run(runid, project):
"""Updates status of specified run or project-run to Failed"""
username = CONFIG.get('statusdb', {}).get('username')
password = CONFIG.get('statusdb', {}).get('password')
url = CONFIG.get('statusdb', {}).get('url')
port = CONFIG.get('statusdb', {}).get('port')
status_db_url = "http://{username}:{password}@{url}:{port}".format(username=username, password=password, url=url, port=port)
logger.info('Connecting to status db: {}:{}'.format(url, port))
try:
status_db = couchdb.Server(status_db_url)
except Exception, e:
logger.error("Can't connect to status_db: {}".format(status_db_url))
logger.error(e)
raise e
示例9: archive_to_swestore
def archive_to_swestore(days, run=None, max_runs=None, force=False, compress_only=False):
"""Send runs (as archives) in NAS nosync to swestore for backup
:param int days: number fo days to check threshold
:param str run: specific run to send swestore
:param int max_runs: number of runs to be processed simultaneously
:param bool force: Force the archiving even if the run is not complete
:param bool compress_only: Compress the run without sending it to swestore
"""
# If the run is specified in the command line, check that exists and archive
if run:
run = os.path.basename(run)
base_dir = os.path.dirname(run)
if re.match(filesystem.RUN_RE, run):
# If the parameter is not an absolute path, find the run in the archive_dirs
if not base_dir:
for archive_dir in CONFIG.get("storage").get("archive_dirs"):
if os.path.exists(os.path.join(archive_dir, run)):
base_dir = archive_dir
if not os.path.exists(os.path.join(base_dir, run)):
logger.error(
(
"Run {} not found. Please make sure to specify "
"the absolute path or relative path being in "
"the correct directory.".format(run)
)
)
else:
with filesystem.chdir(base_dir):
_archive_run((run, days, force, compress_only))
else:
logger.error("The name {} doesn't look like an Illumina run".format(os.path.basename(run)))
# Otherwise find all runs in every data dir on the nosync partition
else:
logger.info("Archiving old runs to SWESTORE")
for to_send_dir in CONFIG.get("storage").get("archive_dirs"):
logger.info("Checking {} directory".format(to_send_dir))
with filesystem.chdir(to_send_dir):
to_be_archived = [
r
for r in os.listdir(to_send_dir)
if re.match(filesystem.RUN_RE, r) and not os.path.exists("{}.archiving".format(r.split(".")[0]))
]
if to_be_archived:
pool = Pool(processes=len(to_be_archived) if not max_runs else max_runs)
pool.map_async(_archive_run, ((run, days, force, compress_only) for run in to_be_archived))
pool.close()
pool.join()
else:
logger.info("No old runs to be archived")
示例10: cleanup_processing
def cleanup_processing(days):
"""Cleanup runs in processing server.
:param int days: Number of days to consider a run to be old
"""
transfer_file = os.path.join(CONFIG.get("preprocessing", {}).get("status_dir"), "transfer.tsv")
if not days:
days = CONFIG.get("cleanup", {}).get("processing-server", {}).get("days", 10)
try:
# Move finished runs to nosync
for data_dir in CONFIG.get("storage").get("data_dirs"):
logger.info("Moving old runs in {}".format(data_dir))
with filesystem.chdir(data_dir):
for run in [r for r in os.listdir(data_dir) if re.match(filesystem.RUN_RE, r)]:
if filesystem.is_in_file(transfer_file, run):
logger.info("Moving run {} to nosync directory".format(os.path.basename(run)))
shutil.move(run, "nosync")
else:
logger.info(
("Run {} has not been transferred to the analysis " "server yet, not archiving".format(run))
)
# Remove old runs from archiving dirs
for archive_dir in CONFIG.get("storage").get("archive_dirs").values():
logger.info("Removing old runs in {}".format(archive_dir))
with filesystem.chdir(archive_dir):
for run in [r for r in os.listdir(archive_dir) if re.match(filesystem.RUN_RE, r)]:
rta_file = os.path.join(run, "RTAComplete.txt")
if os.path.exists(rta_file):
# 1 day == 60*60*24 seconds --> 86400
if os.stat(rta_file).st_mtime < time.time() - (86400 * days) and filesystem.is_in_swestore(
"{}.tar.bz2".format(run)
):
logger.info("Removing run {} to nosync directory".format(os.path.basename(run)))
shutil.rmtree(run)
else:
logger.info(
"RTAComplete.txt file exists but is not older than {} day(s), skipping run {}".format(
str(days), run
)
)
except IOError:
sbj = "Cannot archive old runs in processing server"
msg = "Could not find transfer.tsv file, so I cannot decide if I should " "archive any run or not."
cnt = CONFIG.get("contact", None)
if not cnt:
cnt = "{}@localhost".format(getpass.getuser())
logger.error(msg)
misc.send_mail(sbj, msg, cnt)
示例11: trigger_analysis
def trigger_analysis(run_id):
""" Trigger the analysis of the flowcell in the analysis sever.
:param str run_id: run/flowcell id
"""
if not CONFIG.get('analysis', {}).get('analysis_server', {}):
logger.warn(("No configuration found for remote analysis server. "
"Not triggering analysis of {}"
.format(os.path.basename(run_id))))
else:
url = ("http://{host}:{port}/flowcell_analysis/{dir}"
.format(host=CONFIG['analysis']['analysis_server']['host'],
port=CONFIG['analysis']['analysis_server']['port'],
dir=os.path.basename(run_id)))
params = {'path': CONFIG['analysis']['analysis_server']['sync']['data_archive']}
try:
r = requests.get(url, params=params)
if r.status_code != requests.status_codes.codes.OK:
logger.warn(("Something went wrong when triggering the "
"analysis of {}. Please check the logfile "
"and make sure to start the analysis!"
.format(os.path.basename(run_id))))
else:
logger.info('Analysis of flowcell {} triggered in {}'
.format(os.path.basename(run_id),
CONFIG['analysis']['analysis_server']['host']))
a_file = os.path.join(CONFIG['analysis']['status_dir'], 'analysis.tsv')
with open(a_file, 'a') as analysis_file:
tsv_writer = csv.writer(analysis_file, delimiter='\t')
tsv_writer.writerow([os.path.basename(run_id), str(datetime.now())])
except requests.exceptions.ConnectionError:
logger.warn(("Something went wrong when triggering the analysis "
"of {}. Please check the logfile and make sure to "
"start the analysis!".format(os.path.basename(run_id))))
示例12: update_cronjob_db
def update_cronjob_db():
server = platform.node().split(".")[0]
timestamp = datetime.datetime.now()
# parse results
result = _parse_crontab()
# connect to db
url = "http://{username}:{password}@{url}:{port}".format(
url=CONFIG.get("statusdb", {}).get("url"),
username=CONFIG.get("statusdb", {}).get("username"),
password=CONFIG.get("statusdb", {}).get("password"),
port=CONFIG.get("statusdb", {}).get("port"),
)
logging.info("Connecting to database: {}".format(CONFIG.get("statusdb", {}).get("url")))
try:
couch = couchdb.Server(url)
except Exception, e:
logging.error(e.message)
示例13: __init__
def __init__(self, projectid=None, sampleid=None, pi_email=None, sensitive=True, hard_stage_only=False, **kwargs):
super(GrusProjectDeliverer, self).__init__(
projectid,
sampleid,
**kwargs
)
self.stagingpathhard = getattr(self, 'stagingpathhard', None)
if self.stagingpathhard is None:
raise AttributeError("stagingpathhard is required when delivering to GRUS")
self.config_snic = CONFIG.get('snic',None)
if self.config_snic is None:
raise AttributeError("snic confoguration is needed delivering to GRUS (snic_api_url, snic_api_user, snic_api_password")
self.config_statusdb = CONFIG.get('statusdb',None)
if self.config_statusdb is None:
raise AttributeError("statusdb configuration is needed delivering to GRUS (url, username, password, port")
self.orderportal = CONFIG.get('order_portal',None) # do not need to raise exception here, I have already checked for this and monitoring does not need it
self.pi_email = pi_email
self.sensitive = sensitive
self.hard_stage_only = hard_stage_only
示例14: cleanup_swestore
def cleanup_swestore(seconds, dry_run=False):
"""Remove archived runs from swestore
:param int seconds: Days/hours converted as seconds to check
"""
seconds = check_default(site, seconds, CONFIG)
if not seconds:
return
runs = filesystem.list_runs_in_swestore(path=CONFIG.get('cleanup').get('swestore').get('root'))
for run in runs:
date = run.split('_')[0]
if misc.to_seconds(misc.days_old(date)) > seconds:
if dry_run:
logger.info('Will remove file {} from swestore'.format(run))
continue
misc.call_external_command('irm -f {}'.format(run))
logger.info('Removed file {} from swestore'.format(run))
示例15: cleanup_swestore
def cleanup_swestore(days, dry_run=False):
"""Remove archived runs from swestore
:param int days: Threshold days to check and remove
"""
days = check_days('swestore', days, config)
if not days:
return
runs = filesystem.list_runs_in_swestore(path=CONFIG.get('cleanup').get('swestore').get('root'))
for run in runs:
date = run.split('_')[0]
if misc.days_old(date) > days:
if dry_run:
logger.info('Will remove file {} from swestore'.format(run))
continue
misc.call_external_command('irm -f {}'.format(run))
logger.info('Removed file {} from swestore'.format(run))