本文整理汇总了Python中opus_core.variables.variable_name.VariableName类的典型用法代码示例。如果您正苦于以下问题:Python VariableName类的具体用法?Python VariableName怎么用?Python VariableName使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VariableName类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_constants
def test_constants(self):
# test an expression involving two dataset names, one of which is *_constant
expr = "test_agent.age<=opus_constant.young_age"
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='test_agents',
table_data={
"age":array([30,20,60,80]),
"id":array([1,3,4,10])
}
)
storage.write_table(
table_name='opus_constants',
table_data={
"young_age":array([35]),
"opus_constant_id":array([1])
}
)
dataset_pool = DatasetPool(storage=storage)
# Test that the dataset name is correct for expr. It should be test_agent -- opus_constant just holds constants,
# and is ignored as far as finding the dataset name for the expression.
name = VariableName(expr)
autogen = name.get_autogen_class()
self.assertEqual(name.get_package_name(), None)
self.assertEqual(name.get_dataset_name(), 'test_agent')
# make an instance of the class and check the dependencies (it shouldn't depend on urbansim_constant)
self.assertEqual(autogen().dependencies(), ['test_agent.age'])
dataset = Dataset(in_storage=storage, in_table_name='test_agents', id_name="id", dataset_name="test_agent")
result = dataset.compute_variables([expr], dataset_pool=dataset_pool)
should_be = array( [True,True,False,False] )
self.assertEqual( ma.allequal( result, should_be), True)
示例2: test_multiply
def test_multiply(self):
expr = 'test_agent.income*test_location.cost'
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='test_agents',
table_data={'id': array([1, 2, 3]), 'income': array([1, 20, 500])}
)
storage.write_table(
table_name='test_locations',
table_data={'id': array([1,2]), 'cost': array([1000, 2000])}
)
dataset_pool = DatasetPool(package_order=['opus_core'], storage=storage)
test_agent_x_test_location = dataset_pool.get_dataset('test_agent_x_test_location')
result = test_agent_x_test_location.compute_variables(expr, dataset_pool=dataset_pool)
should_be = array([[1000, 2000],
[20000, 40000],
[500000, 1000000]])
self.assert_(ma.allclose(result, should_be, rtol=1e-6), msg = "Error in " + expr)
name = VariableName(expr)
# since the expression involves both test_agent and test_location, the dataset name should be None
# and the interaction set names should be (test_agent, test_location) or (test_location, test_agent)
self.assertEqual(name.get_dataset_name(), None)
names = name.get_interaction_set_names()
self.assertEqual(len(names),2)
self.assert_('test_agent' in names)
self.assert_('test_location' in names)
示例3: _do_flush_dependent_variables_if_required
def _do_flush_dependent_variables_if_required(self):
try:
if not SessionConfiguration().get('flush_variables', False):
return
except:
return
from opus_core.datasets.interaction_dataset import InteractionDataset
dataset = self.get_dataset()
dependencies = self.get_current_dependencies()
my_dataset_name = dataset.get_dataset_name()
for iattr in range(len(dependencies)): # iterate over dependent variables
dep_item = dependencies[iattr][0]
if isinstance(dep_item, str):
depvar_name = VariableName(dep_item)
else:
depvar_name = dep_item.get_variable_name() # dep_item should be an instance of AttributeBox
dataset_name = depvar_name.get_dataset_name()
if dataset_name == my_dataset_name:
ds = dataset
else:
ds = SessionConfiguration().get_dataset_from_pool(dataset_name)
#ds = dataset_pool.get_dataset('dataset_name')
if not isinstance(ds, InteractionDataset):
short_name = depvar_name.get_alias()
if short_name not in ds.get_id_name():
ds.flush_attribute(depvar_name)
示例4: check_parse_errors
def check_parse_errors(self, variables):
# check the variables in the expression library as indexed by the list 'variables'.
errors = []
for (var_name, dataset_name, use, source, expr) in variables:
# special case -- the 'constant' expression always passes
if expr.strip()=='constant' and var_name=='constant':
continue
try:
n = VariableName(expr)
# check that the expression is of the correct form given the source
if source=='primary attribute':
if n.get_autogen_class() is not None:
errors.append("Error - this is parsing as an expression rather than as a primary attribute: (%s, %s): %s" % (var_name, dataset_name, expr))
elif n.get_dataset_name() is None:
errors.append("Error in primary attribute - missing dataset name: (%s, %s): %s" % (var_name, dataset_name, expr))
elif dataset_name!=n.get_dataset_name():
errors.append("Error in primary attribute - dataset name mismatch: (%s, %s): %s" % (var_name, dataset_name, expr))
elif n.get_package_name() is not None:
errors.append("Error in primary attribute - shouldn't have package name: (%s, %s): %s" % (var_name, dataset_name, expr))
elif source=='expression':
if n.get_autogen_class() is None:
errors.append("Error - this doesn't seem to be an expression. Maybe it should be a Python class or primary attribute?: (%s, %s): %s" % (var_name, dataset_name, expr))
elif source=='Python class':
if n.get_autogen_class() is not None:
errors.append("Error - this is parsing as an expression rather than as a Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
elif n.get_package_name() is None:
errors.append("Error - missing package name in Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
elif n.get_dataset_name() is None:
errors.append("Error - missing dataset name in Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
elif dataset_name!=n.get_dataset_name():
errors.append("Error - dataset name mismatch in Python class reference: (%s, %s): %s" % (var_name, dataset_name, expr))
else:
errors.append("Unknown source type %s: (%s, %s): %s" % (source, var_name, dataset_name, expr))
except (SyntaxError, ValueError), e:
errors.append("Parsing error: (%s, %s): %s" % (var_name, dataset_name, str(e)))
示例5: _compute_if_needed
def _compute_if_needed(self, name, dataset_pool, resources=None, quiet=False, version=None):
""" Compute variable given by the argument 'name' only if this variable
has not been computed before.
Check first if this variable belongs to dataset1 or dataset2.
dataset_pool holds available datasets.
"""
if not isinstance(name, VariableName):
variable_name = VariableName(name)
else:
variable_name = name
short_name = variable_name.get_alias()
dataset_name = variable_name.get_dataset_name()
if dataset_name == self.get_dataset_name():
new_version = UrbansimDataset._compute_if_needed(self, variable_name, dataset_pool, resources, quiet=quiet, version=version)
else:
if dataset_name == self.dataset1.get_dataset_name():
owner_dataset = self.dataset1
# index = self.get_2d_index_of_dataset1()
elif dataset_name == self.dataset2.get_dataset_name():
owner_dataset = self.dataset2
# index = self.get_2d_index()
else:
self._raise_error(StandardError, "Cannot find variable '%s'\nin either dataset or in the interaction set." %
variable_name.get_expression())
owner_dataset.compute_variables([variable_name], dataset_pool, resources=resources, quiet=True)
new_version = self.compute_variables_return_versions_and_final_value("%s = %s.disaggregate(%s.%s)" % \
( short_name, self.get_dataset_name(), owner_dataset.get_dataset_name(), short_name ),
dataset_pool=dataset_pool, resources=resources, quiet=quiet )[0]
return new_version
示例6: compute_expression
def compute_expression(self, attribute_name):
"""Compute any expression and return its values."""
var_name = VariableName(attribute_name)
dataset_name = var_name.get_dataset_name()
ds = self.get_dataset(dataset_name)
return ds.compute_variables([var_name],
dataset_pool=self.get_dataset_pool())
示例7: _compute_if_needed
def _compute_if_needed(self, name, dataset_pool, resources=None, quiet=False, version=None):
""" Compute variable given by the argument 'name' only if this variable
has not been computed before.
Check first if this variable belongs to dataset1 or dataset2.
dataset_pool holds available datasets.
"""
if not isinstance(name, VariableName):
variable_name = VariableName(name)
else:
variable_name = name
short_name = variable_name.get_alias()
if (short_name in self.get_attribute_names()) and (self.are_dependent_variables_up_to_date(
variable_name, version=version)):
return version #nothing to be done
dataset_name = variable_name.get_dataset_name()
if dataset_name == self.get_dataset_name():
new_version = self._compute_one_variable(variable_name, dataset_pool, resources)
else:
owner_dataset, index = self.get_owner_dataset_and_index(dataset_name)
if owner_dataset is None:
self._raise_error(StandardError, "Cannot find variable '%s'\nin either dataset or in the interaction set." %
variable_name.get_expression())
owner_dataset.compute_variables([variable_name], dataset_pool, resources=resources, quiet=True)
new_version = self.add_attribute(data = owner_dataset.get_attribute_by_index(variable_name, index),
name = variable_name, metadata = AttributeType.COMPUTED)
attribute_box = owner_dataset._get_attribute_box(variable_name)
variable = attribute_box.get_variable_instance()
my_attribute_box = self._get_attribute_box(variable_name)
my_attribute_box.set_variable_instance(variable)
return new_version
示例8: run
def run(self, year, condition=None, max_iter=10):
"""
'year' is the current year of the simulation.
'condition' should be a boolean expression defined on any dataset.
The method iterates over the given models until all values of the expression are True.
'max_iter' gives the maximum number of iterations to run, if 'condition' is not fulfilled.
If it is None, there is no limit and thus, the condition must be fulfilled in order to terminate.
If 'condition' is None, the set of models is run only once.
"""
self.config['years'] = (year, year)
if condition is None:
return self.model_system.run_in_same_process(self.config)
dataset_pool = SessionConfiguration().get_dataset_pool()
variable_name = VariableName(condition)
dataset = dataset_pool.get_dataset(variable_name.get_dataset_name())
condition_value = dataset.compute_variables(variable_name, dataset_pool=dataset_pool)
result = None
iter = 1
while not alltrue(condition_value):
result = self.model_system.run_in_same_process(self.config)
if max_iter is None or iter > max_iter:
break
iter = iter + 1
# force to recompute the condition
dataset = SessionConfiguration().get_dataset_pool().get_dataset(variable_name.get_dataset_name())
dataset.delete_computed_attributes()
condition_value = dataset.compute_variables(variable_name,
dataset_pool=SessionConfiguration().get_dataset_pool())
if not alltrue(condition_value):
logger.log_status('%s did not converge. Maximum number of iterations (%s) reached.' % (self.model_name, max_iter))
else:
logger.log_status('%s converged in %s iterations.' % (self.model_name, iter-1))
return result
示例9: test_interaction_set_component
def test_interaction_set_component(self):
# test a fully-qualified variable that applies to a component of an interaction set
expr = "opus_core.test_agent.income_times_2"
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='test_agents',
table_data={'id': array([1, 2, 3]), 'income': array([1, 20, 500])}
)
storage.write_table(
table_name='test_locations',
table_data={'id': array([1,2]), 'cost': array([1000, 2000])}
)
dataset_pool = DatasetPool(package_order=['opus_core'], storage=storage)
test_agent_x_test_location = dataset_pool.get_dataset('test_agent_x_test_location')
result = test_agent_x_test_location.compute_variables(expr, dataset_pool=dataset_pool)
should_be = array([[2, 2], [40, 40], [1000, 1000]])
self.assert_(ma.allclose(result, should_be, rtol=1e-6), msg = "Error in " + expr)
# test that the interaction set now has this as an attribute
result2 = test_agent_x_test_location.get_attribute('income_times_2')
self.assert_(ma.allclose(result2, should_be, rtol=1e-6), msg = "Error in " + expr)
# test that the variable can now also be accessed using its short name
result3 = test_agent_x_test_location.compute_variables(['income_times_2'])
self.assert_(ma.allclose(result3, should_be, rtol=1e-6), msg = "Error in " + expr)
# even though we're using this with an interaction set, the dataset name for expr
# should be the name of the component set (since that's the only one mentioned in expr)
name = VariableName(expr)
self.assertEqual(name.get_dataset_name(), 'test_agent', msg="bad value for dataset")
示例10: compute_m
def compute_m(self, year, quantity_of_interest):
variable_name = VariableName(quantity_of_interest)
dataset_name = variable_name.get_dataset_name()
for i in range(self.number_of_runs):
ds = self._compute_variable_for_one_run(i, variable_name, dataset_name, year, self.observed_data.get_quantity_object(quantity_of_interest))
if i == 0: # first run
self.m = zeros((ds.size(), self.number_of_runs), dtype=float32)
self.m_ids = ds.get_id_attribute()
self.m[:, i] = try_transformation(ds.get_attribute(variable_name), self.transformation_pair_for_prediction[0])
示例11: prepare_for_run
def prepare_for_run(self, expressions_to_compute=None, dataset_pool=None):
if expressions_to_compute is not None:
if dataset_pool is None:
dataset_pool = SessionConfiguration().get_dataset_pool()
for expression in expressions_to_compute:
vn = VariableName(expression)
dataset_name = vn.get_dataset_name()
dataset = dataset_pool[dataset_name]
dataset.compute_variables(expression)
示例12: test_fully_qualified_variable
def test_fully_qualified_variable(self):
# this tests an expression consisting of a fully-qualified variable
expr = "opus_core.test_agent.income_times_2"
storage = StorageFactory().get_storage("dict_storage")
storage.write_table(table_name="test_agents", table_data={"income": array([1, 5, 10]), "id": array([1, 3, 4])})
dataset = Dataset(in_storage=storage, in_table_name="test_agents", id_name="id", dataset_name="test_agent")
result = dataset.compute_variables([expr])
should_be = array([2, 10, 20])
self.assert_(ma.allclose(result, should_be, rtol=1e-6), "Error in test_fully_qualified_variable")
# check that expr is in the cache of known expressions
# (normally we shouldn't be accessing this private field, but just this once ...)
cache = VariableName._cache
self.assert_(expr in cache, msg="did not find expr in cache")
# check that the access methods for the variable all return the correct values
name = VariableName(expr)
self.assertEqual(name.get_package_name(), "opus_core", msg="bad value for package")
self.assertEqual(name.get_dataset_name(), "test_agent", msg="bad value for dataset")
self.assertEqual(name.get_short_name(), "income_times_2", msg="bad value for shortname")
self.assertEqual(name.get_alias(), "income_times_2", msg="bad value for alias")
self.assertEqual(name.get_autogen_class(), None, msg="bad value for autogen_class")
# test that the variable can now also be accessed using its short name in an expression
result2 = dataset.compute_variables(["income_times_2"])
self.assert_(ma.allclose(result2, should_be, rtol=1e-6), "Error in accessing a_test_variable")
# check that the cache uses the variable name with whitespace removed
oldsize = len(cache)
expr_with_spaces = "opus_core . test_agent. income_times_2 "
name2 = VariableName(expr_with_spaces)
newsize = len(cache)
self.assertEqual(oldsize, newsize, msg="caching error")
self.assert_(expr_with_spaces not in cache, msg="caching error")
self.assertEqual(expr_with_spaces, name2.get_expression(), msg="caching error")
self.assertEqual(name2.get_short_name(), "income_times_2", msg="bad value for shortname")
示例13: create_and_check_qualified_variable_name
def create_and_check_qualified_variable_name(self, name):
"""Convert name to a VariableName if it isn't already, and add dataset_name to
the VariableName if it is missing. If it already has a dataset_name, make sure
it is the same as the name of this dataset.
"""
if isinstance(name, VariableName):
vname = name
else:
vname = VariableName(name)
if vname.get_dataset_name() is None:
vname.set_dataset_name(self.get_dataset_name())
else:
self._check_dataset_name(vname)
return vname
示例14: compute_values_from_multiple_runs
def compute_values_from_multiple_runs(self, year, quantity_of_interest, dtype='float32', dataset_arguments={}):
"""
'quantity_of_interest' is a variable name in its fully-qualified name.
Return a matrix of size (dataset.size x number_of_runs), with values of the variable
for each dataset member and run. Dataset is the one to which the
quantity_of_interest belongs to.
"""
variable_name = VariableName(quantity_of_interest)
dataset_name = variable_name.get_dataset_name()
for i in range(self.cache_set.size):
ds = self._compute_variable_for_one_run(i, variable_name, dataset_name, year, dataset_arguments=dataset_arguments)
if i == 0: # first run
result = zeros((ds.size(), self.cache_set.size), dtype=dtype)
result[:, i] = ds.get_attribute(variable_name)
return result
示例15: __init__
def __init__(self, variable_name, observed_data, filename=None, transformation=None, inverse_transformation=None,
filter=None, match=False, dependent_datasets={}, **kwargs):
""" 'variable_name' is a quantity about which we have data available.
'observed_data' is of type ObservedData, it is the grouping parent.
'filename' is the name of file where
the data is stored. It can be None, if the observed_data.directory is a cache.
'transformation' is an operation to be performed on the data (e.g. sqrt, log),
'inverse_transformation' is the inverse function of 'transformation'. If it not given, it
is determined automatically.
'filter' is a variable that will be applied to both, the observed data and the simulated data.
'match' (logical) determines if the dataset should be matched (by ids) with the simulated dataset. Elements
that don't match are eliminated from the simulated dataset.
'dependent_datasets' (if any) should be a dictionary of dataset_name:{'filename': filename, 'match': True|False, **kwargs}.
They will be added to the dataset_pool.
Remaining arguments are passed into DatasetFactory, thus it can contain information about how
to create the corresponding dataset.
"""
self.variable_name = VariableName(variable_name)
self.dataset_name = self.variable_name.get_dataset_name()
dataset_pool = observed_data.get_dataset_pool()
self.matching_datasets = {}
if dataset_pool is None:
kwargs.update({'in_storage':observed_data.get_storage(), 'in_table_name': filename})
try:
self.dataset = DatasetFactory().search_for_dataset(self.dataset_name, observed_data.get_package_order(), arguments=kwargs)
except: # take generic dataset
self.dataset = Dataset(dataset_name=self.dataset_name, **kwargs)
else:
self.dataset = dataset_pool.get_dataset(self.dataset_name)
if match:
self.add_match(self.dataset)
for dep_dataset_name, info in dependent_datasets.iteritems():
if dataset_pool is None:
dataset_pool = DatasetPool(storage=observed_data.get_storage(), package_order=observed_data.get_package_order())
info.update({'in_storage':observed_data.get_storage(), 'in_table_name': info.get('filename')})
del info['filename']
match = False
if 'match' in info.keys():
match = info['match']
del info['match']
try:
dep_dataset = DatasetFactory().search_for_dataset(dep_dataset_name, observed_data.get_package_order(), arguments=info)
except:
dep_dataset = Dataset(dataset_name=dep_dataset_name, **info)
dataset_pool.replace_dataset(dep_dataset_name, dep_dataset)
if match:
self.add_match(dep_dataset)
if self.variable_name.get_alias() not in self.dataset.get_known_attribute_names():
self.dataset.compute_variables([self.variable_name], dataset_pool=dataset_pool)
if filter is not None:
filter_values = self.dataset.compute_variables([filter], dataset_pool=dataset_pool)
idx = where(filter_values > 0)[0]
self.add_match(self.dataset, idx)
self.dataset.subset_by_index(idx)
self.transformation = transformation
self.inverse_transformation = inverse_transformation
if (self.transformation is not None) and (self.inverse_transformation is None):
self.inverse_transformation = self.transformation_pairs[self.transformation]