本文整理汇总了Python中DAS.services.abstract_service.DASAbstractService类的典型用法代码示例。如果您正苦于以下问题:Python DASAbstractService类的具体用法?Python DASAbstractService怎么用?Python DASAbstractService使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DASAbstractService类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, config):
DASAbstractService.__init__(self, 'combined', config)
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
self.dbs = 'dbs3'
self.sites = {'tstamp': 0} # local cache
self.thr = 24*60*60 # 1 day for local cache
示例2: __init__
def __init__(self, config):
DASAbstractService.__init__(self, 'dbs3', config)
self.reserved = ['api', 'apiversion']
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
self.prim_instance = config['dbs']['dbs_global_instance']
self.instances = config['dbs']['dbs_instances']
示例3: parser
def parser(self, query, dformat, source, api):
"""
ReqMgr data-service parser.
"""
if api == 'inputdataset':
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
try:
data = row['dataset']
data = \
data['WMCore.RequestManager.DataStructs.Request.Request']
if data.has_key('InputDatasetTypes'):
arr = []
for key, val in data['InputDatasetTypes'].iteritems():
arr.append({'dataset':key, 'type':val})
data['InputDatasetTypes'] = arr
yield data
except:
yield row
elif api == 'configIDs':
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
try:
for key, val in row['dataset'].iteritems():
yield dict(request_name=key, config_files=val)
except:
pass
示例4: __init__
def __init__(self, config):
DASAbstractService.__init__(self, 'runsum', config)
self.results = []
self.params = {'DB':'cms_omds_lb', 'FORMAT':'XML'}
self._keys = None
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
示例5: __init__
def __init__(self, config):
DASAbstractService.__init__(self, 'dbs', config)
self.reserved = ['api', 'apiversion']
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
self.prim_instance = self.dasmapping.dbs_global_instance(self.name)
self.instances = self.dasmapping.dbs_instances(self.name)
self.extended_expire = config['dbs'].get('extended_expire', 0)
self.extended_threshold = config['dbs'].get('extended_threshold', 0)
示例6: __init__
def __init__(self, config):
DASAbstractService.__init__(self, "xwho", config)
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
self.re_summary_ids = re.compile(r'<a href="/xwho/people/([0-9]{6})">')
self.re_find_name = re.compile(r"<h1>(.*?)</h1>")
self.re_find_email = re.compile(r"<a href=mailto:(.*?)>")
self.re_find_phone = re.compile(r"<b>Tel:</b>([0-9 ]+)")
示例7: __init__
def __init__(self, config):
DASAbstractService.__init__(self, "dbs3", config)
self.reserved = ["api", "apiversion"]
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
self.prim_instance = self.dasmapping.dbs_global_instance(self.name)
self.instances = self.dasmapping.dbs_instances(self.name)
self.extended_expire = config["dbs"].get("extended_expire", 0)
self.extended_threshold = config["dbs"].get("extended_threshold", 0)
self.dbs_choice = config["das"].get("main_dbs", "dbs3")
示例8: __init__
def __init__(self, config):
DASAbstractService.__init__(self, 'cmsswconfigs', config)
self.headers = {'Accept': 'text/json;application/json'}
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
# specify access to DB
dburi = config.get('dburi')
self.conn = db_connection(dburi)
database = self.conn['configdb']
self.managers = {}
for release in database.collection_names():
if release.find('index') == -1:
self.managers[release] = MongoQuery(release)
self.releases = self.managers.keys()
示例9: parser
def parser(self, query, dformat, source, api):
"""
CondDB data-service parser.
"""
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
yield row
示例10: parser
def parser(self, query, dformat, source, api):
"""
DBS3 data-service parser.
"""
if api == 'site4dataset':
sites = set()
for rec in json_parser(source, self.logger):
if isinstance(rec, list):
for row in rec:
orig_site = row['origin_site_name']
if orig_site not in sites:
sites.add(orig_site)
else:
orig_site = rec.get('origin_site_name', None)
if orig_site and orig_site not in sites:
sites.add(orig_site)
for site in sites:
yield {'site': {'name': site}}
elif api == 'filesummaries':
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
yield row['dataset']
elif api == 'blockparents':
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
try:
del row['parent']['this_block_name']
except:
pass
yield row
elif api == 'fileparents':
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
parent = row['parent']
for val in parent['parent_logical_file_name']:
yield dict(name=val)
elif api == 'filechildren':
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
parent = row['child']
for val in parent['child_logical_file_name']:
yield dict(name=val)
else:
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
yield row
示例11: parser
def parser(self, query, dformat, source, api):
"""
CondDB data-service parser.
"""
gen = DASAbstractService.parser(self, query, dformat, source, api)
for row in gen:
if api == 'get_lumi_info':
for lumi in row['lumi']['Lumi']:
yield lumi
else:
yield row
示例12: __init__
def __init__(self, config):
DASAbstractService.__init__(self, 'dashboard', config)
self.headers = {'Accept': 'text/xml'}
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
示例13: parser_helper
def parser_helper(self, query, dformat, source, api):
"""
DBS3 data-service parser helper, it is used by parser method.
"""
if api == "site4dataset":
gen = json_parser(source, self.logger)
else:
gen = DASAbstractService.parser(self, query, dformat, source, api)
if api == "site4dataset":
sites = set()
for rec in gen:
if isinstance(rec, list):
for row in rec:
orig_site = row["origin_site_name"]
if orig_site not in sites:
sites.add(orig_site)
else:
orig_site = rec.get("origin_site_name", None)
if orig_site and orig_site not in sites:
sites.add(orig_site)
for site in sites:
yield {"site": {"name": site}}
elif api == "datasets" or api == "dataset_info":
for row in gen:
row["name"] = row["dataset"]
del row["dataset"]
yield {"dataset": row}
elif api == "filesummaries":
name = query.mongo_query["spec"]["dataset.name"]
for row in gen:
row["dataset"]["name"] = name
yield row
elif api == "summary4dataset_run" or api == "summary4block_run":
spec = query.mongo_query.get("spec", {})
dataset = spec.get("dataset.name", "")
block = spec.get("block.name", "")
run = spec.get("run.run_number", 0)
if isinstance(run, dict): # we got a run range
if "$in" in run:
run = run["$in"]
elif "$lte" in run:
run = range(run["$gte"], run["$lte"])
for row in gen:
if run:
row.update({"run": run})
if dataset:
row.update({"dataset": dataset})
if block:
row.update({"block": block})
yield row
elif api == "blockorigin":
for row in gen:
yield row
elif api == "blockparents":
for row in gen:
try:
del row["parent"]["this_block_name"]
except:
pass
yield row
elif api == "fileparents":
for row in gen:
parent = row["parent"]
for val in parent["parent_logical_file_name"]:
yield dict(name=val)
elif api == "runs_via_dataset" or api == "runs":
for row in gen:
values = row["run"]["run_num"]
if isinstance(values, list):
for val in values:
yield dict(run_number=val)
else:
yield dict(run_number=values)
elif api == "filechildren":
for row in gen:
parent = row["child"]
for val in parent["child_logical_file_name"]:
yield dict(name=val)
elif api == "files" or api == "files_via_dataset" or api == "files_via_block":
status = "VALID"
for row in gen:
if "spec" in query.mongo_query:
if "status.name" in query.mongo_query["spec"]:
status = query.mongo_query["spec"]["status.name"]
file_status = row["file"]["is_file_valid"]
if status == "INVALID": # filter out valid files
if int(file_status) == 1: # valid status
row = None
else: # filter out invalid files
if int(file_status) == 0: # invalid status
row = None
if row:
yield row
elif api == "filelumis" or api == "filelumis4block":
for row in gen:
if "lumi" in row:
if "lumi_section_num" in row["lumi"]:
val = row["lumi"]["lumi_section_num"]
row["lumi"]["lumi_section_num"] = convert2ranges(val)
yield row
#.........这里部分代码省略.........
示例14: __init__
def __init__(self, config):
DASAbstractService.__init__(self, "dq", config)
self._keys = None
self.map = self.dasmapping.servicemap(self.name)
map_validator(self.map)
示例15: parser_helper
def parser_helper(self, query, dformat, source, api):
"""
DBS3 data-service parser helper, it is used by parser method.
"""
if api in ['site4dataset', 'site4block']:
gen = json_parser(source, self.logger)
else:
gen = DASAbstractService.parser(self, query, dformat, source, api)
if api in ['site4dataset', 'site4block']:
sites = set()
for rec in gen:
if isinstance(rec, list):
for row in rec:
orig_site = row['origin_site_name']
if orig_site not in sites:
sites.add(orig_site)
else:
orig_site = rec.get('origin_site_name', None)
if orig_site and orig_site not in sites:
sites.add(orig_site)
for site in sites:
yield {'site': {'name': site}}
elif api == 'datasets' or api == 'dataset_info' or api == 'datasetlist':
for row in gen:
row['name'] = row['dataset']
del row['dataset']
yield {'dataset':row}
elif api == 'filesummaries':
name = query.mongo_query['spec']['dataset.name']
for row in gen:
row['dataset']['name'] = name
yield row
elif api == 'summary4dataset_run' or api == 'summary4block_run':
spec = query.mongo_query.get('spec', {})
dataset = spec.get('dataset.name', '')
block = spec.get('block.name', '')
run = spec.get('run.run_number', 0)
if isinstance(run, dict): # we got a run range
if '$in' in run:
run = run['$in']
elif '$lte' in run:
run = range(run['$gte'], run['$lte'])
for row in gen:
if run:
row.update({"run": run})
if dataset:
row.update({"dataset": dataset})
if block:
row.update({"block": block})
yield row
elif api == 'releaseversions':
for row in gen:
values = row['release']['release_version']
for val in values:
yield dict(release=dict(name=val))
elif api == 'datasetaccesstypes':
for row in gen:
values = row['status']['dataset_access_type']
for val in values:
yield dict(status=dict(name=val))
elif api == 'blockorigin':
for row in gen:
yield row
elif api == 'blockparents':
for row in gen:
try:
del row['parent']['this_block_name']
except:
pass
yield row
elif api == 'fileparents':
for row in gen:
parent = row['parent']
for val in parent['parent_logical_file_name']:
yield dict(name=val)
elif api == 'runs_via_dataset' or api == 'runs':
for row in gen:
values = row.get('run', {}).get('run_num', 'N/A')
if isinstance(values, list):
for val in values:
yield dict(run_number=val)
else:
yield dict(run_number=values)
elif api == 'filechildren':
for row in gen:
parent = row['child']
for val in parent['child_logical_file_name']:
yield dict(name=val)
elif api == 'files' or api == 'files_via_dataset' or \
api == 'files_via_block':
status = 'VALID'
for row in gen:
if 'spec' in query.mongo_query:
if 'status.name' in query.mongo_query['spec']:
status = query.mongo_query['spec']['status.name']
try:
file_status = row['file']['is_file_valid']
except KeyError:
file_status = 0 # file status is unknown
if status == '*': # any file
#.........这里部分代码省略.........