本文整理匯總了Python中lasagne.init.Constant方法的典型用法代碼示例。如果您正苦於以下問題:Python init.Constant方法的具體用法?Python init.Constant怎麽用?Python init.Constant使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類lasagne.init
的用法示例。
在下文中一共展示了init.Constant方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, num_styles=None, epsilon=1e-4,
beta=Constant(0), gamma=Constant(1), **kwargs):
super(InstanceNormLayer, self).__init__(incoming, **kwargs)
self.axes = (2, 3)
self.epsilon = epsilon
if num_styles == None:
shape = (self.input_shape[1],)
else:
shape = (num_styles, self.input_shape[1])
if beta is None:
self.beta = None
else:
self.beta = self.add_param(beta, shape, 'beta',
trainable=True, regularizable=False)
if gamma is None:
self.gamma = None
else:
self.gamma = self.add_param(gamma, shape, 'gamma',
trainable=True, regularizable=True)
示例2: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False, groups=1,
W=init.Uniform(), b=init.Constant(0.),
nonlinearity=nl.rectify, flip_filters=True,
convolution=T.nnet.conv2d, filter_dilation=(1, 1), **kwargs):
assert num_filters % groups == 0
self.groups = groups
super(GroupConv2DLayer, self).__init__(incoming, num_filters, filter_size,
stride=stride, pad=pad,
untie_biases=untie_biases,
W=W, b=b,
nonlinearity=nonlinearity,
flip_filters=flip_filters,
convolution=convolution,
filter_dilation=filter_dilation,
**kwargs)
示例3: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, num_labels, mask_input=None, W=init.GlorotUniform(), b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(CRFLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels + 1
self.pad_label_index = num_labels
num_inputs = self.input_shape[2]
self.W = self.add_param(W, (num_inputs, self.num_labels, self.num_labels), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels, self.num_labels), name="b", regularizable=False)
示例4: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
示例5: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
示例6: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, num_labels, mask_input=None, W_h=init.GlorotUniform(), W_c=init.GlorotUniform(),
b=init.Constant(0.), **kwargs):
# This layer inherits from a MergeLayer, because it can have two
# inputs - the layer input, and the mask.
# We will just provide the layer input as incomings, unless a mask input was provided.
self.input_shape = incoming.output_shape
incomings = [incoming]
self.mask_incoming_index = -1
if mask_input is not None:
incomings.append(mask_input)
self.mask_incoming_index = 1
super(DepParserLayer, self).__init__(incomings, **kwargs)
self.num_labels = num_labels
num_inputs = self.input_shape[2]
# add parameters
self.W_h = self.add_param(W_h, (num_inputs, self.num_labels), name='W_h')
self.W_c = self.add_param(W_c, (num_inputs, self.num_labels), name='W_c')
if b is None:
self.b = None
else:
self.b = self.add_param(b, (self.num_labels,), name='b', regularizable=False)
示例7: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, p=0.5, shared_axes=(), noise_samples=None,
**kwargs):
super(DenseDropoutLayer, self).__init__(
incoming, num_units, W, b, nonlinearity,
num_leading_axes, **kwargs)
self.p = p
self.shared_axes = shared_axes
# init randon number generator
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
# initialize noise samples
self.noise = self.init_noise(noise_samples)
示例8: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incomings, nfilters, nrings=5, nrays=16,
W=LI.GlorotNormal(), b=LI.Constant(0.0),
normalize_rings=False, normalize_input=False, take_max=True,
nonlinearity=LN.rectify, **kwargs):
super(GCNNLayer, self).__init__(incomings, **kwargs)
# patch operator sizes
self.nfilters = nfilters
self.nrings = nrings
self.nrays = nrays
self.filter_shape = (nfilters, self.input_shapes[0][1], nrings, nrays)
self.biases_shape = (nfilters, )
# path operator parameters
self.normalize_rings = normalize_rings
self.normalize_input = normalize_input
self.take_max = take_max
self.nonlinearity = nonlinearity
# layer parameters:
# y = Wx + b, where x are the input features and y are the output features
self.W = self.add_param(W, self.filter_shape, name="W")
self.b = self.add_param(b, self.biases_shape, name="b", regularizable=False)
示例9: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, num_centers,
locs=init.Normal(std=1), log_sigma=init.Constant(0.),
**kwargs):
super(RBFLayer, self).__init__(incoming, **kwargs)
self.num_centers = num_centers
assert len(self.input_shape) == 2
in_dim = self.input_shape[1]
self.locs = self.add_param(locs, (num_centers, in_dim), name='locs',
regularizable=False)
self.log_sigma = self.add_param(log_sigma, (), name='log_sigma')
示例10: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, args, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, **kwargs):
super(DenseLayerWithReg, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
self.num_units = num_units
if num_leading_axes >= len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"leaving no trailing axes for the dot product." %
(num_leading_axes, len(self.input_shape)))
elif num_leading_axes < -len(self.input_shape):
raise ValueError(
"Got num_leading_axes=%d for a %d-dimensional input, "
"requesting more trailing axes than there are input "
"dimensions." % (num_leading_axes, len(self.input_shape)))
self.num_leading_axes = num_leading_axes
if any(s is None for s in self.input_shape[num_leading_axes:]):
raise ValueError(
"A DenseLayer requires a fixed input shape (except for "
"the leading axes). Got %r for num_leading_axes=%d." %
(self.input_shape, self.num_leading_axes))
num_inputs = int(np.prod(self.input_shape[num_leading_axes:]))
self.W = self.add_param(W, (num_inputs, num_units), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_units,), name="b",
regularizable=False)
if args.regL1 is True:
self.L1 = self.add_param(init.Constant(args.regInit['L1']),
(num_inputs, num_units), name="L1")
if args.regL2 is True:
self.L2 = self.add_param(init.Constant(args.regInit['L2']),
(num_inputs, num_units), name="L2")
示例11: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, filter_size,
init_std=5., W_logstd=None,
stride=1, pad=0,
nonlinearity=None,
convolution=conv1d_mc0, **kwargs):
super(GaussianScan1DLayer, self).__init__(incoming, **kwargs)
# convolution = conv1d_gpucorrmm_mc0
# convolution = conv.conv1d_mc0
# convolution = T.nnet.conv2d
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.filter_size = as_tuple(filter_size, 1)
self.stride = as_tuple(stride, 1)
self.convolution = convolution
# if self.filter_size[0] % 2 == 0:
# raise NotImplementedError(
# 'GaussianConv1dLayer requires odd filter size.')
if pad == 'valid':
self.pad = (0,)
elif pad in ('full', 'same', 'strictsame'):
self.pad = pad
else:
self.pad = as_tuple(pad, 1, int)
if W_logstd is None:
init_std = np.asarray(init_std, dtype=floatX)
W_logstd = init.Constant(np.log(init_std))
# print(W_std)
# W_std = init.Constant(init_std),
self.num_input_channels = self.input_shape[1]
# self.num_filters = self.num_input_channels
self.W_logstd = self.add_param(W_logstd,
(self.num_input_channels,),
name="W_logstd",
regularizable=False)
self.W = self.make_gaussian_filter()
示例12: __init__
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
beta=init.Constant(-3.0),
mean=init.Constant(0), inv_std=init.Constant(1), **kwargs):
super(BatchNormSparseLayer, self).__init__(incoming, **kwargs)
if axes == 'auto':
# default: normalize over all but the second axis
axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(axes, int):
axes = (axes,)
self.axes = axes
self.epsilon = epsilon
self.alpha = alpha
# create parameters, ignoring all dimensions in axes
shape = [size for axis, size in enumerate(self.input_shape)
if axis not in self.axes]
if any(size is None for size in shape):
raise ValueError("BatchNormSparseLayer needs specified input sizes for "
"all axes not normalized over.")
self.beta = self.add_param(beta, shape, 'beta',
trainable=False, regularizable=False)
self.mean = self.add_param(mean, shape, 'mean',
trainable=False, regularizable=False)
self.inv_std = self.add_param(inv_std, shape, 'inv_std',
trainable=False, regularizable=False)
示例13: batch_nmsp
# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Constant [as 別名]
def batch_nmsp(layer, beta=init.Constant(-3.0), **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = nonlinearities.identity
if hasattr(layer, 'b') and layer.b is not None:
del layer.params[layer.b]
layer.b = None
layer = BatchNormSparseLayer(layer, beta=beta, **kwargs)
if nonlinearity is not None:
from lasagne.layers import NonlinearityLayer
layer = NonlinearityLayer(layer, nonlinearity)
return layer