本文整理汇总了Python中HydraServer.db.DBSession.query方法的典型用法代码示例。如果您正苦于以下问题:Python DBSession.query方法的具体用法?Python DBSession.query怎么用?Python DBSession.query使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类HydraServer.db.DBSession
的用法示例。
在下文中一共展示了DBSession.query方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_datasets
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def _get_datasets(dataset_ids):
"""
Get all the datasets in a list of dataset IDS. This must be done in chunks of 999,
as sqlite can only handle 'in' with < 1000 elements.
"""
dataset_dict = {}
datasets = []
if len(dataset_ids) > qry_in_threshold:
idx = 0
extent =qry_in_threshold
while idx < len(dataset_ids):
log.info("Querying %s datasets", len(dataset_ids[idx:extent]))
rs = DBSession.query(Dataset).filter(Dataset.dataset_id.in_(dataset_ids[idx:extent])).all()
datasets.extend(rs)
idx = idx + qry_in_threshold
if idx + qry_in_threshold > len(dataset_ids):
extent = len(dataset_ids)
else:
extent = extent + qry_in_threshold
else:
datasets = DBSession.query(Dataset).filter(Dataset.dataset_id.in_(dataset_ids))
for r in datasets:
dataset_dict[r.dataset_id] = r
log.info("Retrieved %s datasets", len(dataset_dict))
return dataset_dict
示例2: get_attribute_data
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def get_attribute_data(attr_ids, node_ids, **kwargs):
"""
For a given attribute or set of attributes, return all the resources and
resource scenarios in the network
"""
node_attrs = DBSession.query(ResourceAttr).\
options(joinedload_all('attr')).\
filter(ResourceAttr.node_id.in_(node_ids),
ResourceAttr.attr_id.in_(attr_ids)).all()
ra_ids = []
for ra in node_attrs:
ra_ids.append(ra.resource_attr_id)
resource_scenarios = DBSession.query(ResourceScenario).filter(ResourceScenario.resource_attr_id.in_(ra_ids)).options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata')).order_by(ResourceScenario.scenario_id).all()
for rs in resource_scenarios:
if rs.dataset.hidden == 'Y':
try:
rs.dataset.check_read_permission(kwargs.get('user_id'))
except:
rs.dataset.value = None
rs.dataset.frequency = None
rs.dataset.start_time = None
DBSession.expunge(rs)
return node_attrs, resource_scenarios
示例3: _get_existing_data
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def _get_existing_data(hashes):
str_hashes = [str(h) for h in hashes]
hash_dict = {}
datasets = []
if len(str_hashes) > qry_in_threshold:
idx = 0
extent =qry_in_threshold
while idx < len(str_hashes):
log.info("Querying %s datasets", len(str_hashes[idx:extent]))
rs = DBSession.query(Dataset).filter(Dataset.data_hash.in_(str_hashes[idx:extent])).all()
datasets.extend(rs)
idx = idx + qry_in_threshold
if idx + qry_in_threshold > len(str_hashes):
extent = len(str_hashes)
else:
extent = extent + qry_in_threshold
else:
datasets = DBSession.query(Dataset).filter(Dataset.data_hash.in_(str_hashes))
for r in datasets:
hash_dict[r.data_hash] = r
log.info("Retrieved %s datasets", len(hash_dict))
return hash_dict
示例4: get_resource_attributes
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def get_resource_attributes(ref_key, ref_id, type_id=None, **kwargs):
"""
Get all the resource attributes for a given resource.
If type_id is specified, only
return the resource attributes within the type.
"""
user_id = kwargs.get('user_id')
resource_attr_qry = DBSession.query(ResourceAttr).filter(
ResourceAttr.ref_key == ref_key,
or_(
ResourceAttr.network_id==ref_id,
ResourceAttr.node_id==ref_id,
ResourceAttr.link_id==ref_id,
ResourceAttr.group_id==ref_id
))
if type_id is not None:
attr_ids = []
rs = DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all()
for r in rs:
attr_ids.append(r.attr_id)
resource_attr_qry = resource_attr_qry.filter(ResourceAttr.attr_id.in_(attr_ids))
resource_attrs = resource_attr_qry.all()
return resource_attrs
示例5: _get_metadata
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def _get_metadata(dataset_ids):
"""
Get all the metadata for a given list of datasets
"""
metadata = []
if len(dataset_ids) == 0:
return []
if len(dataset_ids) > qry_in_threshold:
idx = 0
extent = qry_in_threshold
while idx < len(dataset_ids):
log.info("Querying %s metadatas", len(dataset_ids[idx:extent]))
rs = DBSession.query(Metadata).filter(Metadata.dataset_id.in_(dataset_ids[idx:extent])).all()
metadata.extend(rs)
idx = idx + qry_in_threshold
if idx + qry_in_threshold > len(dataset_ids):
extent = len(dataset_ids)
else:
extent = extent +qry_in_threshold
else:
metadata_qry = DBSession.query(Metadata).filter(Metadata.dataset_id.in_(dataset_ids))
for m in metadata_qry:
metadata.append(m)
return metadata
示例6: update_value_from_mapping
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def update_value_from_mapping(source_resource_attr_id, target_resource_attr_id, source_scenario_id, target_scenario_id, **kwargs):
"""
Using a resource attribute mapping, take the value from the source and apply
it to the target. Both source and target scenarios must be specified (and therefor
must exist).
"""
rm = aliased(ResourceAttrMap, name='rm')
#Check the mapping exists.
mapping = DBSession.query(rm).filter(
or_(
and_(
rm.resource_attr_id_a == source_resource_attr_id,
rm.resource_attr_id_b == target_resource_attr_id
),
and_(
rm.resource_attr_id_a == target_resource_attr_id,
rm.resource_attr_id_b == source_resource_attr_id
)
)
).first()
if mapping is None:
raise ResourceNotFoundError("Mapping between %s and %s not found"%
(source_resource_attr_id,
target_resource_attr_id))
#check scenarios exist
s1 = _get_scenario(source_scenario_id, False, False)
s2 = _get_scenario(target_scenario_id, False, False)
rs = aliased(ResourceScenario, name='rs')
rs1 = DBSession.query(rs).filter(rs.resource_attr_id == source_resource_attr_id,
rs.scenario_id == source_scenario_id).first()
rs2 = DBSession.query(rs).filter(rs.resource_attr_id == target_resource_attr_id,
rs.scenario_id == target_scenario_id).first()
#3 possibilities worth considering:
#1: Both RS exist, so update the target RS
#2: Target RS does not exist, so create it with the dastaset from RS1
#3: Source RS does not exist, so it must be removed from the target scenario if it exists
return_value = None#Either return null or return a new or updated resource scenario
if rs1 is not None:
if rs2 is not None:
log.info("Destination Resource Scenario exists. Updating dastaset ID")
rs2.dataset_id = rs1.dataset_id
else:
log.info("Destination has no data, so making a new Resource Scenario")
rs2 = ResourceScenario(resource_attr_id=target_resource_attr_id, scenario_id=target_scenario_id, dataset_id=rs1.dataset_id)
DBSession.add(rs2)
DBSession.flush()
return_value = rs2
else:
log.info("Source Resource Scenario does not exist. Deleting destination Resource Scenario")
if rs2 is not None:
DBSession.delete(rs2)
DBSession.flush()
return return_value
示例7: update_dataset
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def update_dataset(dataset_id, name, data_type, val, units, dimension, metadata={}, **kwargs):
"""
Update an existing dataset
"""
if dataset_id is None:
raise HydraError("Dataset must have an ID to be updated.")
user_id = kwargs.get('user_id')
dataset = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
#This dataset been seen before, so it may be attached
#to other scenarios, which may be locked. If they are locked, we must
#not change their data, so new data must be created for the unlocked scenarios
locked_scenarios = []
unlocked_scenarios = []
for dataset_rs in dataset.resourcescenarios:
if dataset_rs.scenario.locked == 'Y':
locked_scenarios.append(dataset_rs)
else:
unlocked_scenarios.append(dataset_rs)
#Are any of these scenarios locked?
if len(locked_scenarios) > 0:
#If so, create a new dataset and assign to all unlocked datasets.
dataset = add_dataset(data_type,
val,
units,
dimension,
metadata=metadata,
name=name,
user_id=kwargs['user_id'])
for unlocked_rs in unlocked_scenarios:
unlocked_rs.dataset = dataset
else:
dataset.set_val(data_type, val)
dataset.set_metadata(metadata)
dataset.data_type = data_type
dataset.data_units = units
dataset.data_name = name
dataset.data_dimen = dimension
dataset.created_by = kwargs['user_id']
dataset.data_hash = dataset.set_hash()
#Is there a dataset in the DB already which is identical to the updated dataset?
existing_dataset = DBSession.query(Dataset).filter(Dataset.data_hash==dataset.data_hash, Dataset.dataset_id != dataset.dataset_id).first()
if existing_dataset is not None and existing_dataset.check_user(user_id):
log.warn("An identical dataset %s has been found to dataset %s."
" Deleting dataset and returning dataset %s",
existing_dataset.dataset_id, dataset.dataset_id, existing_dataset.dataset_id)
DBSession.delete(dataset)
dataset = existing_dataset
return dataset
示例8: convert_dataset
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def convert_dataset(dataset_id, to_unit,**kwargs):
"""Convert a whole dataset (specified by 'dataset_id' to new unit
('to_unit'). Conversion ALWAYS creates a NEW dataset, so function
returns the dataset ID of new dataset.
"""
ds_i = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
dataset_type = ds_i.data_type
dsval = ds_i.get_val()
old_unit = ds_i.data_units
if old_unit is not None:
if dataset_type == 'scalar':
new_val = hydra_units.convert(float(dsval), old_unit, to_unit)
elif dataset_type == 'array':
dim = array_dim(dsval)
vecdata = arr_to_vector(dsval)
newvec = hydra_units.convert(vecdata, old_unit, to_unit)
new_val = vector_to_arr(newvec, dim)
elif dataset_type == 'timeseries':
new_val = []
for ts_time, ts_val in dsval.items():
dim = array_dim(ts_val)
vecdata = arr_to_vector(ts_val)
newvec = hydra_units.convert(vecdata, old_unit, to_unit)
newarr = vector_to_arr(newvec, dim)
new_val.append(ts_time, newarr)
elif dataset_type == 'descriptor':
raise HydraError('Cannot convert descriptor.')
new_dataset = Dataset()
new_dataset.data_units = to_unit
new_dataset.set_val(dataset_type, new_val)
new_dataset.data_dimen = ds_i.data_dimen
new_dataset.data_name = ds_i.data_name
new_dataset.data_type = ds_i.data_type
new_dataset.hidden = 'N'
new_dataset.set_metadata(ds_i.get_metadata_as_dict())
new_dataset.set_hash()
existing_ds = DBSession.query(Dataset).filter(Dataset.data_hash==new_dataset.data_hash).first()
if existing_ds is not None:
DBSession.expunge_all()
return existing_ds.dataset_id
DBSession.add(new_dataset)
DBSession.flush()
return new_dataset.dataset_id
else:
raise HydraError('Dataset has no units.')
示例9: clone_dataset
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def clone_dataset(dataset_id,**kwargs):
"""
Get a single dataset, by ID
"""
user_id = int(kwargs.get('user_id'))
if dataset_id is None:
return None
dataset = DBSession.query(Dataset).filter(
Dataset.dataset_id==dataset_id).options(joinedload_all('metadata')).first()
if dataset is None:
raise HydraError("Dataset %s does not exist."%(dataset_id))
if dataset is not None and dataset.created_by != user_id:
owner = DBSession.query(DatasetOwner).filter(
DatasetOwner.dataset_id==Dataset.dataset_id,
DatasetOwner.user_id==user_id).first()
if owner is None:
raise PermissionError("User %s is not an owner of dataset %s and therefore cannot clone it."%(user_id, dataset_id))
DBSession.expunge(dataset)
make_transient(dataset)
dataset.data_name = dataset.data_name + "(Clone)"
dataset.dataset_id = None
dataset.cr_date = None
#Try to avoid duplicate metadata entries if the entry has been cloned previously
for m in dataset.metadata:
if m.metadata_name in ("clone_of", "cloned_by"):
del(m)
cloned_meta = Metadata()
cloned_meta.metadata_name = "clone_of"
cloned_meta.metadata_val = str(dataset_id)
dataset.metadata.append(cloned_meta)
cloned_meta = Metadata()
cloned_meta.metadata_name = "cloned_by"
cloned_meta.metadata_val = str(user_id)
dataset.metadata.append(cloned_meta)
dataset.set_hash()
DBSession.add(dataset)
DBSession.flush()
cloned_dataset = DBSession.query(Dataset).filter(
Dataset.dataset_id==dataset.dataset_id).first()
return cloned_dataset
示例10: get_resource_data
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def get_resource_data(ref_key, ref_id, scenario_id, type_id,**kwargs):
"""
Get all the resource scenarios for a given resource
in a given scenario. If type_id is specified, only
return the resource scenarios for the attributes
within the type.
"""
user_id = kwargs.get('user_id')
#THis can be either a single ID or list, so make them consistent
if not isinstance(scenario_id, list):
scenario_id = [scenario_id]
resource_data_qry = DBSession.query(ResourceScenario).filter(
ResourceScenario.dataset_id == Dataset.dataset_id,
ResourceAttr.resource_attr_id == ResourceScenario.resource_attr_id,
ResourceScenario.scenario_id.in_(scenario_id),
ResourceAttr.ref_key == ref_key,
or_(
ResourceAttr.network_id==ref_id,
ResourceAttr.node_id==ref_id,
ResourceAttr.link_id==ref_id,
ResourceAttr.group_id==ref_id
)).distinct().options(joinedload('resourceattr')).options(joinedload_all('dataset.metadata'))
if type_id is not None:
attr_ids = []
rs = DBSession.query(TypeAttr).filter(TypeAttr.type_id==type_id).all()
for r in rs:
attr_ids.append(r.attr_id)
resource_data_qry = resource_data_qry.filter(ResourceAttr.attr_id.in_(attr_ids))
resource_data = resource_data_qry.all()
for rs in resource_data:
try:
rs.dataset.value = zlib.decompress(rs.dataset.value)
except zlib.error:
pass
if rs.dataset.hidden == 'Y':
try:
rs.dataset.check_read_permission(user_id)
except:
rs.dataset.value = None
rs.dataset.frequency = None
rs.dataset.start_time = None
DBSession.expunge_all()
return resource_data
示例11: get_datasets
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def get_datasets(dataset_ids,**kwargs):
"""
Get a single dataset, by ID
"""
user_id = int(kwargs.get('user_id'))
datasets = []
if len(dataset_ids) == 0:
return []
try:
dataset_rs = DBSession.query(Dataset.dataset_id,
Dataset.data_type,
Dataset.data_units,
Dataset.data_dimen,
Dataset.data_name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.start_time).label('start_time'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.frequency).label('frequency'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.value).label('value')).filter(
Dataset.dataset_id.in_(dataset_ids)).outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.dataset_id,
DatasetOwner.user_id==user_id)).all()
#convert the value row into a string as it is returned as a binary
for dataset_row in dataset_rs:
dataset_dict = dataset_row._asdict()
if dataset_row.value is not None:
dataset_dict['value'] = str(dataset_row.value)
if dataset_row.hidden == 'N' or (dataset_row.hidden == 'Y' and dataset_row.user_id is not None):
metadata = DBSession.query(Metadata).filter(Metadata.dataset_id == dataset_row.dataset_id).all()
dataset_dict['metadata'] = metadata
else:
dataset_dict['metadata'] = []
datasets.append(namedtuple('Dataset', dataset_dict.keys())(**dataset_dict))
except NoResultFound:
raise ResourceNotFoundError("Datasets not found.")
return datasets
示例12: get_dataset
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def get_dataset(dataset_id,**kwargs):
"""
Get a single dataset, by ID
"""
user_id = int(kwargs.get('user_id'))
if dataset_id is None:
return None
try:
dataset_rs = DBSession.query(Dataset.dataset_id,
Dataset.data_type,
Dataset.data_units,
Dataset.data_dimen,
Dataset.data_name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.start_time).label('start_time'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.frequency).label('frequency'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.value).label('value')).filter(
Dataset.dataset_id==dataset_id).outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.dataset_id,
DatasetOwner.user_id==user_id)).one()
rs_dict = dataset_rs._asdict()
#convert the value row into a string as it is returned as a binary
if dataset_rs.value is not None:
rs_dict['value'] = str(dataset_rs.value)
if dataset_rs.hidden == 'N' or (dataset_rs.hidden == 'Y' and dataset_rs.user_id is not None):
metadata = DBSession.query(Metadata).filter(Metadata.dataset_id==dataset_id).all()
rs_dict['metadata'] = metadata
else:
rs_dict['metadata'] = []
except NoResultFound:
raise HydraError("Dataset %s does not exist."%(dataset_id))
dataset = namedtuple('Dataset', rs_dict.keys())(**rs_dict)
return dataset
示例13: delete_dataset
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def delete_dataset(dataset_id,**kwargs):
"""
Removes a piece of data from the DB.
CAUTION! Use with care, as this cannot be undone easily.
"""
try:
d = DBSession.query(Dataset).filter(Dataset.dataset_id==dataset_id).one()
except NoResultFound:
raise HydraError("Dataset %s does not exist."%dataset_id)
dataset_rs = DBSession.query(ResourceScenario).filter(ResourceScenario.dataset_id==dataset_id).all()
if len(dataset_rs) > 0:
raise HydraError("Cannot delete %s. Dataset is used by resource scenarios."%dataset_id)
DBSession.delete(d)
DBSession.flush()
示例14: add_resource_attribute
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def add_resource_attribute(resource_type, resource_id, attr_id, is_var,**kwargs):
"""
Add a resource attribute attribute to a resource.
attr_is_var indicates whether the attribute is a variable or not --
this is used in simulation to indicate that this value is expected
to be filled in by the simulator.
"""
attr = DBSession.query(Attr).filter(Attr.attr_id==attr_id).first()
if attr is None:
raise ResourceNotFoundError("Attribute with ID %s does not exist."%attr_id)
resource_i = _get_resource(resource_type, resource_id)
for ra in resource_i.attributes:
if ra.attr_id == attr_id:
raise HydraError("Duplicate attribute. %s %s already has attribute %s"
%(resource_type, resource_i.get_name(), attr.attr_name))
attr_is_var = 'Y' if is_var else 'N'
new_ra = resource_i.add_attribute(attr_id, attr_is_var)
DBSession.flush()
return new_ra
示例15: get_link_mappings
# 需要导入模块: from HydraServer.db import DBSession [as 别名]
# 或者: from HydraServer.db.DBSession import query [as 别名]
def get_link_mappings(link_id, link_2_id=None, **kwargs):
"""
Get all the resource attribute mappings in a network. If another network
is specified, only return the mappings between the two networks.
"""
qry = DBSession.query(ResourceAttrMap).filter(
or_(
and_(
ResourceAttrMap.resource_attr_id_a == ResourceAttr.resource_attr_id,
ResourceAttr.link_id == link_id),
and_(
ResourceAttrMap.resource_attr_id_b == ResourceAttr.resource_attr_id,
ResourceAttr.link_id == link_id)))
if link_2_id is not None:
aliased_ra = aliased(ResourceAttr, name="ra2")
qry = qry.filter(or_(
and_(
ResourceAttrMap.resource_attr_id_a == aliased_ra.resource_attr_id,
aliased_ra.link_id == link_2_id),
and_(
ResourceAttrMap.resource_attr_id_b == aliased_ra.resource_attr_id,
aliased_ra.link_id == link_2_id)))
return qry.all()