本文整理汇总了Python中org.bccvl.site.interfaces.IBCCVLMetadata类的典型用法代码示例。如果您正苦于以下问题:Python IBCCVLMetadata类的具体用法?Python IBCCVLMetadata怎么用?Python IBCCVLMetadata使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IBCCVLMetadata类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dataset_environmental_layer
def dataset_environmental_layer(obj, **kw):
md = IBCCVLMetadata(obj)
# if we have 'layers_used' index it
if 'layers_used' in md:
return md['layers_used']
# otherwise index list of layers provided by dataset
return md.get('layers', None)
示例2: rat
def rat(self):
uuid = self.request.form.get('uuid')
layer = self.request.form.get('layer')
brain = None
try:
brain = uuidToCatalogBrain(uuid)
except Exception as e:
LOG.error('Caught exception %s', e)
if not brain:
self.record_error('Not Found', 404,
'dataset not found',
{'parameter': 'uuid'})
raise NotFound(self, 'metadata', self.request)
md = IBCCVLMetadata(brain.getObject())
if not layer and layer not in md.get('layers', {}):
self.record_error('Bad Request', 400,
'Missing parameter layer',
{'parameter': 'layer'})
raise BadRequest('Missing parameter layer')
try:
rat = md.get('layers', {}).get(layer, {}).get('rat')
rat = json.loads(unicode(rat))
return rat
except Exception as e:
LOG.warning(
"Couldn't decode Raster Attribute Table from metadata. %s: %s",
self.context, repr(e))
raise NotFound(self, 'rat', self.request)
示例3: items
def items(self):
# return dict with keys for experiment
# and subkey 'models' for models within experiment
if self.value:
for experiment_uuid, model_uuids in self.value.items():
item = {}
expbrain = uuidToCatalogBrain(experiment_uuid)
item['title'] = expbrain.Title
item['uuid'] = expbrain.UID
# TODO: what else wolud I need from an experiment?
exp = expbrain.getObject()
expmd = IBCCVLMetadata(exp)
item['resolution'] = expmd.get('resolution')
item['brain'] = expbrain
# now search all models within and add infos
pc = getToolByName(self.context, 'portal_catalog')
brains = pc.searchResults(path=expbrain.getPath(),
BCCDataGenre=self.genre)
# TODO: maybe as generator?
item['datasets'] = [{'uuid': brain.UID,
'title': brain.Title,
'obj': brain.getObject(),
'md': IBCCVLMetadata(brain.getObject()),
'selected': brain.UID in self.value[experiment_uuid]}
for brain in brains]
yield item
示例4: biodiverse_listing_details
def biodiverse_listing_details(expbrain):
details = {}
exp = expbrain.getObject()
species = set()
years = set()
emscs = set()
gcms = set()
for dsuuid in chain.from_iterable(map(lambda x: x.keys(), exp.projection.itervalues())):
dsobj = uuidToObject(dsuuid)
# TODO: should inform user about missing dataset
if dsobj:
md = IBCCVLMetadata(dsobj)
species.add(md.get("species", {}).get("scientificName", ""))
period = md.get("temporal")
if period:
years.add(Period(period).start)
gcm = md.get("gcm")
if gcm:
gcms.add(gcm)
emsc = md.get("emsc")
if emsc:
emscs.add(emsc)
details.update(
{
"type": "BIODIVERSE",
"functions": "endemism, redundancy",
"species_occurrence": ", ".join(sorted(species)),
"species_absence": "{}, {}".format(", ".join(sorted(emscs)), ", ".join(sorted(gcms))),
"years": ", ".join(sorted(years)),
}
)
return details
示例5: details
def details(self, context=None):
# fetch details about dataset, if attributes are unpopulated
# get data from associated collection
if context is None:
context = self.context
coll = context
while not (ISiteRoot.providedBy(coll) or ICollection.providedBy(coll)):
coll = coll.__parent__
# we have either hit siteroot or found a collection
ret = {
'title': context.title,
'description': context.description or coll.description,
'attribution': context.attribution or getattr(coll, 'attribution'),
'rights': context.rights or coll.rights,
'external_description': context.external_description or getattr(coll, 'external_description'),
}
md = IBCCVLMetadata(context)
if 'layers' in md:
layers = []
for layer in sorted(md.get('layers', ())):
try:
layers.append(self.layer_vocab.getTerm(layer))
except:
layers.append(SimpleTerm(layer, layer, layer))
if layers:
ret['layers'] = layers
return ret
示例6: getdatasetparams
def getdatasetparams(uuid):
# return dict with:
# filename
# downloadurl
# dm_accessurl-> maybe add url rewrite to datamover?
# # occurrence specific:
# species
# # raster specific:
# layers ... need to split this up
dsobj = uuidToObject(uuid)
if dsobj is None:
return None
dsinfo = getDatasetInfo(dsobj, uuid)
# if we have species info add it
dsmdr = IBCCVLMetadata(dsobj)
species = dsmdr.get('species', {}).get('scientificName')
if species:
dsinfo['species'] = species
# if we can get layermetadata, let's add it
biomod = getdsmetadata(dsobj)
layers = biomod.get('layers', [])
if len(layers) > 0:
for lk, lv in biomod['layers'].items():
if lv is not None:
dsinfo.setdefault('layers', {})[lk] = {
'filename': lv.get('filename', biomod['filename']),
'datatype': lv.get('datatype', None)}
# return infoset
return dsinfo
示例7: year
def year(obj, **kw):
# FIXME: this indexer is meant for future projection only ....
# - make sure we don't index any other datasets. i.e. environmental and current datasets, which may have a date attached to it, but it is meaningless for future projections
md = IBCCVLMetadata(obj)
year = md.get('year', None)
if year:
year = str(year)
return year
示例8: addSpeciesInfo
def addSpeciesInfo(bccvlmd, result):
if ISDMExperiment.providedBy(result.__parent__):
spds = uuidToObject(result.job_params['species_occurrence_dataset'])
if IProjectionExperiment.providedBy(result.__parent__):
spds = uuidToObject(result.job_params['species_distribution_models'])
speciesmd = IBCCVLMetadata(spds).get('species', None)
if speciesmd:
bccvlmd['species'] = speciesmd.copy()
示例9: scientific_category
def scientific_category(obj, **kw):
md = IBCCVLMetadata(obj)
vocab = getUtility(IVocabularyFactory, 'scientific_category_source')(obj)
path = set()
for cat in md.get('categories', ()):
path.update(vocab.getTermPath(cat))
if path:
return tuple(path)
return None
示例10: items
def items(self):
# return dict with keys for experiment
# and subkey 'models' for models within experiment
if self.value:
for experiment_uuid, model_uuids in self.value.items():
item = {}
expbrain = uuidToCatalogBrain(experiment_uuid)
# TODO: we have an experiment_uuid, but can't access the
# experiment (deleted?, access denied?)
# shall we at least try to get some details?
if expbrain is None:
continue
item['title'] = expbrain.Title
item['uuid'] = expbrain.UID
item['brain'] = expbrain
# TODO: what else wolud I need from an experiment?
exp = expbrain.getObject()
expmd = IBCCVLMetadata(exp)
item['resolution'] = expmd.get('resolution')
# now search all datasets within and add infos
pc = getToolByName(self.context, 'portal_catalog')
results = pc.searchResults(path=expbrain.getPath(),
portal_type='Folder',
job_state='COMPLETED')
brains = pc.searchResults(path=[r.getPath() for r in results],
BCCDataGenre=self.genre)
# TODO: maybe as generator?
item['subitems'] = []
for brain in brains:
# FIXME: I need a different list of thresholds for display;
# esp. don't look up threshold, but take vales (threshold
# id and value) from field as is
thresholds = dataset.getThresholds(brain.UID)[brain.UID]
threshold = self.value[experiment_uuid].get(brain.UID)
# is threshold in list?
if threshold and threshold['label'] not in thresholds:
# maybe a custom entered number?
# ... I guess we don't really care as long as we produce the same the user entered. (validate?)
thresholds[threshold['label']] = threshold['label']
dsobj = brain.getObject()
dsmd = IBCCVLMetadata(dsobj)
item['subitems'].append({
'uuid': brain.UID,
'title': brain.Title,
'selected': brain.UID in self.value[experiment_uuid],
'threshold': threshold,
'thresholds': thresholds,
'brain': brain,
'md': dsmd,
'obj': dsobj,
# TODO: this correct? only one layer ever?
'layermd': dsmd['layers'].values()[0]
})
yield item
示例11: getGenreSchemata
def getGenreSchemata(self):
schemata = []
md = IBCCVLMetadata(self.context)
genre = md.get('genre')
if genre in self.genre_interface_map:
schemata.append(self.genre_interface_map[genre])
if IBlobDataset.providedBy(self.context):
schemata.append(IBlobDataset)
if IRemoteDataset.providedBy(self.context):
schemata.append(IRemoteDataset)
return schemata
示例12: subitems
def subitems(self, dsbrain):
# return a generator of selectable items within dataset
md = IBCCVLMetadata(dsbrain.getObject())
layer_vocab = self.dstools.layer_vocab
selectedlayers = self.value.get(dsbrain.UID) or ()
for layer in sorted(md.get('layers', ())):
subitem = {
'id': layer,
'title': layer_vocab.getTerm(layer).title,
'selected': layer in selectedlayers,
}
yield subitem
示例13: getRAT
def getRAT(self, datasetid, layer=None):
query = {'UID': datasetid}
dsbrain = dataset.query(self.context, brains=True, **query)
if dsbrain:
# get first brain from list
dsbrain = next(dsbrain, None)
if not dsbrain:
raise NotFound(self.context, datasetid, self.request)
md = IBCCVLMetadata(dsbrain.getObject())
rat = md.get('layers', {}).get(layer, {}).get('rat')
# if we have a rat, let's try and parse it
if rat:
try:
rat = json.loads(unicode(rat))
except Exception as e:
LOG.warning("Couldn't decode Raster Attribute Table from metadata. %s: %s", self.context, repr(e))
rat = None
return rat
示例14: get_project_params
def get_project_params(result):
params = deepcopy(result.job_params)
# get metadata for species_distribution_models
uuid = params["species_distribution_models"]
params["species_distribution_models"] = getdatasetparams(uuid)
# do biomod name mangling of species name
params["species_distribution_models"]["species"] = re.sub(
u"[ _'\"/\(\)\{\}\[\]]", u".", params["species_distribution_models"].get("species", u"Unknown")
)
# we need the layers from sdm to fetch correct files for climate_models
# TODO: getdatasetparams should fetch 'layers'
sdmobj = uuidToObject(uuid)
sdmmd = IBCCVLMetadata(sdmobj)
params["species_distribution_models"]["layers"] = sdmmd.get("layers_used", None)
# do future climate layers
climatelist = []
for uuid, layers in params["future_climate_datasets"].items():
dsinfo = getdatasetparams(uuid)
for layer in layers:
dsdata = {
"uuid": dsinfo["uuid"],
"filename": dsinfo["filename"],
"downloadurl": dsinfo["downloadurl"],
"internalurl": dsinfo["internalurl"],
"layer": layer,
"zippath": dsinfo["layers"][layer]["filename"],
# TODO: add year, gcm, emsc here?
"type": dsinfo["layers"][layer]["datatype"],
}
# if this is a zip file we'll have to set zippath as well
# FIXME: poor check whether this is a zip file
if dsinfo["filename"].endswith(".zip"):
dsdata["zippath"] = dsinfo["layers"][layer]["filename"]
climatelist.append(dsdata)
# replace climate_models parameter
params["future_climate_datasets"] = climatelist
params["selected_models"] = "all"
# projection.name from dsinfo
# FIXME: workaround to get future projection name back, but this works only for file naming scheme with current available data
params["projection_name"], _ = os.path.splitext(dsinfo["filename"])
# add hints for worker
workerhints = {"files": ("species_distribution_models", "future_climate_datasets")}
return {"env": {}, "params": params, "worker": workerhints}
示例15: DatasetSearchableText
def DatasetSearchableText(obj, **kw):
md = IBCCVLMetadata(obj)
entries = [safe_unicode(obj.id), safe_unicode(obj.title) or u"", safe_unicode(obj.description) or u""]
if "layers" in md:
layer_vocab = getUtility(IVocabularyFactory, "layer_source")(obj)
for key in md["layers"]:
if key not in layer_vocab:
continue
entries.append(safe_unicode(layer_vocab.getTerm(key).title) or u"")
if "species" in md:
entries.extend(
(
safe_unicode(md.get("species", {}).get("scientificName")) or u"",
safe_unicode(md.get("species", {}).get("vernacularName")) or u"",
)
)
if md.get("genre") == "DataGenreFC":
# year, gcm, emsc
emsc_vocab = getUtility(IVocabularyFactory, "emsc_source")(obj)
gcm_vocab = getUtility(IVocabularyFactory, "gcm_source")(obj)
year = unicode(md.get("year", u""))
month = unicode(md.get("month", u""))
if md["emsc"] in emsc_vocab:
entries.append(safe_unicode(emsc_vocab.getTerm(md["emsc"]).title) or u"")
if md["gcm"] in gcm_vocab:
entries.append(safe_unicode(gcm_vocab.getTerm(md["gcm"]).title) or u"")
entries.append(year)
entries.append(month)
elif md.get("genre") == "DataGenreCC":
entries.append(u"current")
return u" ".join(entries)