本文整理汇总了Python中pylearn2.config.yaml_parse.load_path函数的典型用法代码示例。如果您正苦于以下问题:Python load_path函数的具体用法?Python load_path怎么用?Python load_path使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_path函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_train_file
def load_train_file(config_file_path, environ=None):
"""
Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables
Parameters
----------
config_file_path : WRITEME
Returns
-------
WRITEME
"""
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
# publish environment variables related to file name
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
for varname in ["PYLEARN2_TRAIN_FILE_FULL_STEM"]:
os.environ[varname] = config_file_full_stem
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
os.environ["PYLEARN2_TRAIN_DIR"] = directory
os.environ["PYLEARN2_TRAIN_BASE_NAME"] = config_file_path.split('/')[-1]
os.environ["PYLEARN2_TRAIN_FILE_STEM"] = config_file_full_stem.split('/')[-1]
return yaml_parse.load_path(config_file_path, environ=environ)
示例2: test_IS_cost
def test_IS_cost():
"""
VAE trains properly with the importance sampling cost
"""
yaml_src_path = os.path.join(os.path.dirname(__file__), "test_vae_cost_is_criterion.yaml")
train_object = yaml_parse.load_path(yaml_src_path)
train_object.main_loop()
示例3: test_load_from_yaml
def test_load_from_yaml(self):
"""
Load dataset from an yaml file.
"""
imdset = yaml_parse.load_path(self.yaml_file)
imdset = imdset['dataset']
self.assertEqual(len(imdset.adjusters), 6)
示例4: load_path
def load_path(path, environ=None, **kwargs):
"""
Convenience function for loading a YAML configuration from a file
into a `PartialPlus` graph.
Parameters
----------
path : str
The path to the file to load on disk.
environ : dict, optional
A dictionary used for ${FOO} substitutions in addition to
environment variables. If a key appears both in `os.environ`
and this dictionary, the value in this dictionary is used.
Returns
-------
graph : Node
A `PartialPlus` or `Literal` node representing the root
node of the YAML hierarchy.
Notes
-----
Other keyword arguments are passed on to `yaml.load`.
"""
return proxy_to_partialplus(yaml_parse.load_path(path, instantiate=False,
**kwargs),
environ=environ)
示例5: test_load_path
def test_load_path():
fd, fname = tempfile.mkstemp()
with os.fdopen(fd, 'wb') as f:
f.write("a: 23")
loaded = load_path(fname)
assert_(loaded['a'] == 23)
os.remove(fname)
示例6: yaml_file_execution
def yaml_file_execution(file_path):
try:
train = yaml_parse.load_path(file_path)
train.algorithm.termination_criterion = EpochCounter(max_epochs=2)
train.main_loop()
except NoDataPathError:
raise SkipTest("PYLEARN2_DATA_PATH environment variable not defined")
示例7: load_train_file
def load_train_file(config_file_path):
"""Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables"""
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
# publish environment variables related to file name
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
for varname in ["PYLEARN2_TRAIN_FILE_NAME", #this one is deprecated
"PYLEARN2_TRAIN_FILE_FULL_STEM"]: #this is the new, accepted name
environ.putenv(varname, config_file_full_stem)
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
environ.putenv("PYLEARN2_TRAIN_DIR", directory)
environ.putenv("PYLEARN2_TRAIN_BASE_NAME", config_file_path.split('/')[-1] )
environ.putenv("PYLEARN2_TRAIN_FILE_STEM", config_file_full_stem.split('/')[-1] )
return yaml_parse.load_path(config_file_path)
示例8: construct_model
def construct_model(self):
filedir = os.path.join(os.path.dirname(__file__), 'mlps.yaml')
layer_args = yaml_parse.load_path(filedir)[self.modelname]
layers = []
# adapt in case of 2d layer
if (self.conv_class == ConvElemwise):
self.adapt_for_2d_conv(layer_args)
else:
self.adapt_for_time_dim(layer_args)
print layer_args
for i, layer_arg in enumerate(layer_args):
layer = self.construct_layer(layer_arg, i)
layers.append(layer)
input_space = self.create_input_space()
mlp = MLP(input_space=input_space, layers=layers)
return mlp
示例9: load_train_file
def load_train_file(config_file_path, environ=None):
"""
Loads and parses a yaml file for a Train object.
Publishes the relevant training environment variables
Parameters
----------
config_file_path : str
Path to a config file containing a YAML string describing a
pylearn2.train.Train object
environ : dict, optional
A dictionary used for ${FOO} substitutions in addition to
environment variables when parsing the YAML file. If a key appears
both in `os.environ` and this dictionary, the value in this
dictionary is used.
Returns
-------
Object described by the YAML string stored in the config file
"""
from pylearn2.config import yaml_parse
suffix_to_strip = '.yaml'
# Publish environment variables related to file name
if config_file_path.endswith(suffix_to_strip):
config_file_full_stem = config_file_path[0:-len(suffix_to_strip)]
else:
config_file_full_stem = config_file_path
os.environ["PYLEARN2_TRAIN_FILE_FULL_STEM"] = config_file_full_stem
directory = config_file_path.split('/')[:-1]
directory = '/'.join(directory)
if directory != '':
directory += '/'
os.environ["PYLEARN2_TRAIN_DIR"] = directory
os.environ["PYLEARN2_TRAIN_BASE_NAME"] = config_file_path.split('/')[-1]
os.environ[
"PYLEARN2_TRAIN_FILE_STEM"] = config_file_full_stem.split('/')[-1]
return yaml_parse.load_path(config_file_path, environ=environ)
示例10: load_yaml
def load_yaml(self, fname):
"""
Slot that loads a YAML file.
"""
if not fname:
return
try:
# publish environment variables relevant to this file
serial.prepare_train_file(fname)
# load the tree of Proxy objects
environ = {}
yaml_tree = yaml_parse.load_path(fname,
instantiate=False,
environ=environ)
yaml_tree = yaml_parse._instantiate(yaml_tree)
self.show_object_tree(yaml_tree)
except Exception, exc:
logger.error('Loading aml file failed', exc_info=True)
QtGui.QMessageBox.warning(self, 'Exception', str(exc))
示例11: main
def main(options, positional_args):
"""
.. todo::
WRITEME
"""
assert len(positional_args) == 1
path ,= positional_args
out = options.out
rescale = options.rescale
if rescale == 'none':
global_rescale = False
patch_rescale = False
elif rescale == 'global':
global_rescale = True
patch_rescale = False
elif rescale == 'individual':
global_rescale = False
patch_rescale = True
else:
assert False
if path.endswith('.pkl'):
from pylearn2.utils import serial
obj = serial.load(path)
elif path.endswith('.yaml'):
print 'Building dataset from yaml...'
obj =yaml_parse.load_path(path)
print '...done'
else:
obj = yaml_parse.load(path)
rows = options.rows
cols = options.cols
if hasattr(obj,'get_batch_topo'):
# obj is a Dataset
dataset = obj
examples = dataset.get_batch_topo(rows*cols)
else:
# obj is a Model
model = obj
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
theano_rng = RandomStreams(42)
design_examples_var = model.random_design_matrix(batch_size =
rows * cols, theano_rng = theano_rng)
from theano import function
print 'compiling sampling function'
f = function([],design_examples_var)
print 'sampling'
design_examples = f()
print 'loading dataset'
dataset = yaml_parse.load(model.dataset_yaml_src)
examples = dataset.get_topological_view(design_examples)
norms = N.asarray( [
N.sqrt(N.sum(N.square(examples[i,:])))
for i in xrange(examples.shape[0])
])
print 'norms of examples: '
print '\tmin: ',norms.min()
print '\tmean: ',norms.mean()
print '\tmax: ',norms.max()
print 'range of elements of examples', \
(examples.min(),examples.max())
print 'dtype: ', examples.dtype
examples = dataset.adjust_for_viewer(examples)
if global_rescale:
examples /= N.abs(examples).max()
if len(examples.shape) != 4:
print 'sorry, view_examples.py only supports image examples' + \
'for now.'
print 'this dataset has ' + \
str(len(examples.shape)-2)+' topological dimensions'
quit(-1)
if examples.shape[3] == 1:
is_color = False
elif examples.shape[3] == 3:
is_color = True
else:
print 'got unknown image format with ' + str(examples.shape[3]) + \
' channels'
print 'supported formats are 1 channel greyscale or three channel RGB'
quit(-1)
print examples.shape[1:3]
pv = patch_viewer.PatchViewer((rows, cols), examples.shape[1:3],
is_color = is_color)
for i in xrange(rows*cols):
#.........这里部分代码省略.........
示例12: main
def main(options, positional_args):
assert len(positional_args) == 1
path, = positional_args
out = options.out
rescale = options.rescale
if rescale == "none":
global_rescale = False
patch_rescale = False
elif rescale == "global":
global_rescale = True
patch_rescale = False
elif rescale == "individual":
global_rescale = False
patch_rescale = True
else:
assert False
if path.endswith(".pkl"):
from pylearn2.utils import serial
obj = serial.load(path)
elif path.endswith(".yaml"):
print "Building dataset from yaml..."
obj = yaml_parse.load_path(path)
print "...done"
else:
obj = yaml_parse.load(path)
rows = options.rows
cols = options.cols
if hasattr(obj, "get_batch_topo"):
# obj is a Dataset
dataset = obj
examples = dataset.get_batch_topo(rows * cols)
else:
# obj is a Model
model = obj
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
theano_rng = RandomStreams(42)
design_examples_var = model.random_design_matrix(batch_size=rows * cols, theano_rng=theano_rng)
from theano import function
print "compiling sampling function"
f = function([], design_examples_var)
print "sampling"
design_examples = f()
print "loading dataset"
dataset = yaml_parse.load(model.dataset_yaml_src)
examples = dataset.get_topological_view(design_examples)
norms = N.asarray([N.sqrt(N.sum(N.square(examples[i, :]))) for i in xrange(examples.shape[0])])
print "norms of examples: "
print "\tmin: ", norms.min()
print "\tmean: ", norms.mean()
print "\tmax: ", norms.max()
print "range of elements of examples", (examples.min(), examples.max())
print "dtype: ", examples.dtype
examples = dataset.adjust_for_viewer(examples)
if global_rescale:
examples /= N.abs(examples).max()
if len(examples.shape) != 4:
print "sorry, view_examples.py only supports image examples for now."
print "this dataset has " + str(len(examples.shape) - 2) + " topological dimensions"
quit(-1)
#
if examples.shape[3] == 1:
is_color = False
elif examples.shape[3] == 3:
is_color = True
else:
print "got unknown image format with " + str(examples.shape[3]) + " channels"
print "supported formats are 1 channel greyscale or three channel RGB"
quit(-1)
#
print examples.shape[1:3]
pv = patch_viewer.PatchViewer((rows, cols), examples.shape[1:3], is_color=is_color)
for i in xrange(rows * cols):
pv.add_patch(examples[i, :, :, :], activation=0.0, rescale=patch_rescale)
#
if out is None:
pv.show()
else:
pv.save(out)
示例13: __init__
def __init__(self,
path='train.csv',
task='classification',
expect_labels=True,
expect_headers=True,
delimiter=',',
start=None,
stop=None,
start_fraction=None,
end_fraction=None,
yaml_src=None,
one_hot=True,
num_classes=4,
which_set=None):
"""
.. todo:: ..
WRITEME
"""
self.path = path
self.task = task
self.expect_labels = expect_labels
self.expect_headers = expect_headers
self.delimiter = delimiter
if which_set is not None:
self.start = start
self.stop = stop
self.start_fraction = start_fraction
self.end_fraction = end_fraction
self.view_converter = None
if yaml_src is not None:
self.yaml_src = yaml_parse.load_path(yaml_src)
# self.yaml_src=yaml_parse.load_path("mlp.yaml")
# eventually; triple-quoted yaml...
self.one_hot = one_hot
self.num_classes = num_classes
if which_set is not None and which_set not in[
'train', 'test', 'valid']:
raise ValueError(
'Unrecognized which_set value "%s".' % (which_set,) +
'". Valid values are ["train","test","valid"].')
else:
self.which_set = which_set
if self.start is not None or self.stop is not None:
raise ValueError("Use start/stop or which_set,"
" just not together.")
if task not in ['classification', 'regression']:
raise ValueError('task must be either "classification" or '
'"regression"; got ' + str(task))
if start_fraction is not None:
if end_fraction is not None:
raise ValueError("Use start_fraction or end_fraction, "
" not both.")
if start_fraction <= 0:
raise ValueError("start_fraction should be > 0")
if start_fraction >= 1:
raise ValueError("start_fraction should be < 1")
if end_fraction is not None:
if end_fraction <= 0:
raise ValueError("end_fraction should be > 0")
if end_fraction >= 1:
raise ValueError("end_fraction should be < 1")
if start is not None:
if start_fraction is not None or end_fraction is not None:
raise ValueError("Use start, start_fraction, or end_fraction,"
" just not together.")
if stop is not None:
if start_fraction is not None or end_fraction is not None:
raise ValueError("Use stop, start_fraction, or end_fraction,"
" just not together.")
# and go
self.path = preprocess(self.path)
X, y = self._load_data()
# y=y.astype(int)
# y=map(int, np.rint(y).astype(int))
if self.task == 'regression':
super(CSVDatasetPlus, self).__init__(X=X, y=y)
else:
# , y_labels=4 # y_labels=np.max(y)+1
super(CSVDatasetPlus, self).__init__(
X=X, y=y.astype(int), y_labels=self.num_classes)
示例14: hasattr
patch_rescale = False
elif rescale == 'global':
global_rescale = True
patch_rescale = False
elif rescale == 'individual':
global_rescale = False
patch_rescale = True
else:
assert False
if path.endswith('.pkl'):
from pylearn2.utils import serial
obj = serial.load(path)
elif path.endswith('.yaml'):
print 'Building dataset from yaml...'
obj =yaml_parse.load_path(path)
print '...done'
else:
obj = yaml_parse.load(path)
rows = options.rows
cols = options.cols
if hasattr(obj,'get_batch_topo'):
#obj is a Dataset
dataset = obj
examples = dataset.get_batch_topo(rows*cols)
else:
#obj is a Model
model = obj
示例15:
__author__ = "Ian Goodfellow"
from pylearn2.config import yaml_parse
import sys
_, path = sys.argv
simulator = yaml_parse.load_path(path)
simulator.main_loop()