本文整理汇总了Python中pylearn2.utils.safe_union函数的典型用法代码示例。如果您正苦于以下问题:Python safe_union函数的具体用法?Python safe_union怎么用?Python safe_union使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了safe_union函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: merge
def merge(left, right):
"""
Combine two FixedVarDescrs
Parameters
----------
left : FixedVarDescr
right : FixedVarDescr
Returns
-------
merged : FixedVarDescr
a new FixedVarDescr describing all variables and operations
described by `left` and `right`
"""
# We assume aliasing is a bug
assert left is not right
assert left.fixed_vars is not right.fixed_vars
assert left.on_load_batch is not right.on_load_batch
merged = FixedVarDescr()
for key in left.fixed_vars:
if key in right.fixed_vars:
raise ValueError("Can't merge these FixedVarDescrs, "
"both contain " + key)
assert not any([key in left.fixed_vars for key in right.fixed_vars])
merged.fixed_vars.update(left.fixed_vars)
merged.fixed_vars.update(right.fixed_vars)
merged.on_load_batch = safe_union(left.on_load_batch,
right.on_load_batch)
return merged
示例2: merge
def merge(left, right):
"""
Combine two FixedVarDescrs
"""
assert left is not right
# We assume aliasing is a bug
assert left.fixed_vars is not right.fixed_vars
assert left.on_load_batch is not right.on_load_batch
rval = FixedVarDescr()
for key in left.fixed_vars:
if key in right.fixed_vars:
raise ValueError("Can't merge these FixedVarDescrs, both contain "+key)
assert not any([key in left.fixed_vars for key in right.fixed_vars])
rval.fixed_vars.update(left.fixed_vars)
rval.fixed_vars.update(right.fixed_vars)
rval.on_load_batch = safe_union(left.on_load_batch, right.on_load_batch)
return rval
def __call__(self, X, Y):
return self.wrapped(X)
示例3: merge
def merge(left, right):
"""
.. todo::
WRITEME properly
Combine two FixedVarDescrs
"""
assert left is not right
# We assume aliasing is a bug
assert left.fixed_vars is not right.fixed_vars
assert left.on_load_batch is not right.on_load_batch
rval = FixedVarDescr()
for key in left.fixed_vars:
if key in right.fixed_vars:
raise ValueError("Can't merge these FixedVarDescrs, "
"both contain " + key)
assert not any([key in left.fixed_vars for key in right.fixed_vars])
rval.fixed_vars.update(left.fixed_vars)
rval.fixed_vars.update(right.fixed_vars)
if left.data_specs == right.data_specs:
# Combining the on_load_batch functions is easy, as they take
# the same input arguments
rval.data_specs = left.fixed_vars
rval.on_load_batch = safe_union(left.on_load_batch,
right.on_load_batch)
else:
# We would have to build a composite data_specs
raise NotImplementedError()
return rval
示例4: __init__
def __init__(self):
self.W1 = [sharedX(rng.randn(num_features, chunk_width)) for i
in xrange(num_chunks)]
disturb_mem.disturb_mem()
self.W2 = [sharedX(rng.randn(chunk_width)) for i in xrange(num_chunks)]
self._params = safe_union(self.W1, self.W2)
self.input_space = VectorSpace(num_features)
self.output_space = VectorSpace(1)
示例5: __init__
def __init__(self, input_space, output_channels, pool_shape, batch_size=None, detector_axes=('b', 'c', 0, 1),
kernel_shape=(2,2), kernel_stride=(1, 1), border_mode='valid',
transformer=None, h_bias=None, v_bias=None, numpy_rng=None,theano_rng=None):
"""
vis_space: Conv2DSpace
transformer: pylearn2.linear.Conv2D instance
h_bias: vector, 大小等于输出的feature maps数,每个分量对应一个feature map
v_bias: vector, 大小等于输入的feature maps数,每个分量对应一个feature map
pool_shape:
pool_stride: 根据Honglak Lee的原文,pool区域无交叠,于是要求pool_stride=pool_shape,因此暂时不单独设置pool_stride参数
需要注意,对于卷积RBM,其隐层对应于卷积后的detector_layer,而输出则对应与pool_layer,因此相对于普通RBM只有输入和输出两个space,CRBM有三个space
"""
Model.__init__(self) # self.names_to_del = set(); self._test_batch_size = 2
Block.__init__(self) # self.fn = None; self.cpu_only = False
self.kernel_shape = kernel_shape
self.kernel_stride = kernel_stride
self.pool_shape = pool_shape
self.pool_stride = pool_shape
self.border_mode = border_mode
self.batch_size = batch_size
self.force_batch_size = batch_size
input_shape = input_space.shape
input_channels = input_space.num_channels
if self.border_mode == 'valid':
detector_shape = [(input_shape[0] - kernel_shape[0])/int(kernel_stride[0]) + 1, (input_shape[1] - kernel_shape[1])/kernel_stride[1] + 1]
elif self.border_mode == 'full':
detector_shape = [(input_shape[0] + kernel_shape[0])/int(kernel_stride[0]) - 1, (input_shape[1] + kernel_shape[1])/kernel_stride[1] - 1]
assert isinstance(input_space, Conv2DSpace)
self.input_space = input_space # add input_space
self.detector_space = Conv2DSpace(shape=detector_shape, num_channels=output_channels, axes=detector_axes) # add detector_space
#当前只考虑detector layer的feature map可以被pool_shape无交叠完整分割的情况
#今后需要补充:边缘补齐的情况
output_shape = (detector_shape[0] / pool_shape[0], detector_shape[1] / pool_shape[1])
self.output_space = Conv2DSpace(shape=output_shape, num_channels=output_channels, axes=detector_axes) # add output_space
self.n_vis = numpy.prod(input_space.shape) * input_space.num_channels
self.n_hid = detector_shape[0] * detector_shape[1] * output_channels
if numpy_rng is None:
# create a number generator
numpy_rng = numpy.random.RandomState(seed=19900418)
self.numpy_rng = numpy_rng
if theano_rng is None:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.theano_rng = theano_rng
if transformer is None:
irange = 4 * numpy.sqrt(6. / (self.n_hid + self.n_vis))
transformer = make_random_conv2D(irange=irange, input_space=self.input_space, output_space=self.detector_space,
kernel_shape = self.kernel_shape, batch_size = self.batch_size,
subsample = kernel_stride,border_mode = self.border_mode, rng=self.numpy_rng)
else:
assert isinstance(transformer, Conv2D)
if h_bias is None:
# create shared variable for hidden units bias
h_bias = theano.shared(value=numpy.zeros(self.detector_space.num_channels, dtype=theano.config.floatX), name='h_bias', borrow=True)
if v_bias is None:
# create shared variable for visible units bias
v_bias = theano.shared(value=numpy.zeros(self.input_space.num_channels, dtype=theano.config.floatX), name='v_bias', borrow=True)
self.transformer = transformer
self.h_bias = h_bias
self.v_bias = v_bias
self._params = safe_union(self.transformer.get_params(), [self.h_bias, self.v_bias])
示例6: __init__
#.........这里部分代码省略.........
Parameters for default SML learning rule:
base_lr : the base learning rate
anneal_start : number of steps after which to start annealing on a 1/t schedule
nchains: number of negative chains
sml_gibbs_steps: number of gibbs steps to take per update
"""
Model.__init__(self)
Block.__init__(self)
if init_bias_vis_marginals is not None:
assert init_bias_vis is None
X = init_bias_vis_marginals.X
assert X.min() >= 0.0
assert X.max() <= 1.0
marginals = X.mean(axis=0)
#rescale the marginals a bit to avoid NaNs
init_bias_vis = inverse_sigmoid_numpy(.01 + .98 * marginals)
if init_bias_vis is None:
init_bias_vis = 0.0
if rng is None:
# TODO: global rng configuration stuff.
rng = numpy.random.RandomState(1001)
self.rng = rng
if vis_space is None:
#if we don't specify things in terms of spaces and a transformer,
#assume dense matrix multiplication and work off of nvis, nhid
assert hid_space is None
assert transformer is None or isinstance(transformer,MatrixMul)
assert nvis is not None
assert nhid is not None
if transformer is None:
if random_patches_src is None:
W = rng.uniform(-irange, irange, (nvis, nhid))
else:
if hasattr(random_patches_src, '__array__'):
W = irange * random_patches_src.T
assert W.shape == (nvis, nhid)
else:
#assert type(irange) == type(0.01)
#assert irange == 0.01
W = irange * random_patches_src.get_batch_design(nhid).T
self.transformer = MatrixMul( sharedX(
W,
name='W',
borrow=True
)
)
else:
self.transformer = transformer
self.vis_space = VectorSpace(nvis)
self.hid_space = VectorSpace(nhid)
else:
assert hid_space is not None
assert transformer is not None
assert nvis is None
assert nhid is None
self.vis_space = vis_space
self.hid_space = hid_space
self.transformer = transformer
try:
b_vis = self.vis_space.get_origin()
b_vis += init_bias_vis
except ValueError:
raise ValueError("bad shape or value for init_bias_vis")
self.bias_vis = sharedX(b_vis, name='bias_vis', borrow=True)
try:
b_hid = self.hid_space.get_origin()
b_hid += init_bias_hid
except ValueError:
raise ValueError('bad shape or value for init_bias_hid')
self.bias_hid = sharedX(b_hid, name='bias_hid', borrow=True)
self.random_patches_src = random_patches_src
self.register_names_to_del(['random_patches_src'])
self.__dict__.update(nhid=nhid, nvis=nvis)
self._params = safe_union(self.transformer.get_params(), [self.bias_vis, self.bias_hid])
self.base_lr = base_lr
self.anneal_start = anneal_start
self.nchains = nchains
self.sml_gibbs_steps = sml_gibbs_steps
示例7: get_params
def get_params(self):
rval = []
for layer in self.layers:
rval = safe_union(layer.get_params(), rval)
return rval