本文整理汇总了Python中lasagne.nonlinearities.rectify方法的典型用法代码示例。如果您正苦于以下问题:Python nonlinearities.rectify方法的具体用法?Python nonlinearities.rectify怎么用?Python nonlinearities.rectify使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.nonlinearities
的用法示例。
在下文中一共展示了nonlinearities.rectify方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_discriminator_toy
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def build_discriminator_toy(image=None, nd=512, GP_norm=None):
Input = InputLayer(shape=(None, 2), input_var=image)
print ("Dis input:", Input.output_shape)
dis0 = DenseLayer(Input, nd, W=Normal(0.02), nonlinearity=relu)
print ("Dis fc0:", dis0.output_shape)
if GP_norm is True:
dis1 = DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu)
else:
dis1 = batch_norm(DenseLayer(dis0, nd, W=Normal(0.02), nonlinearity=relu))
print ("Dis fc1:", dis1.output_shape)
if GP_norm is True:
dis2 = batch_norm(DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu))
else:
dis2 = DenseLayer(dis1, nd, W=Normal(0.02), nonlinearity=relu)
print ("Dis fc2:", dis2.output_shape)
disout = DenseLayer(dis2, 1, W=Normal(0.02), nonlinearity=sigmoid)
print ("Dis output:", disout.output_shape)
return disout
示例2: build_generator_32
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def build_generator_32(noise=None, ngf=128):
# noise input
InputNoise = InputLayer(shape=(None, 100), input_var=noise)
#FC Layer
gnet0 = DenseLayer(InputNoise, ngf*4*4*4, W=Normal(0.02), nonlinearity=relu)
print ("Gen fc1:", gnet0.output_shape)
#Reshape Layer
gnet1 = ReshapeLayer(gnet0,([0],ngf*4,4,4))
print ("Gen rs1:", gnet1.output_shape)
# DeConv Layer
gnet2 = Deconv2DLayer(gnet1, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
print ("Gen deconv1:", gnet2.output_shape)
# DeConv Layer
gnet3 = Deconv2DLayer(gnet2, ngf, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
print ("Gen deconv2:", gnet3.output_shape)
# DeConv Layer
gnet4 = Deconv2DLayer(gnet3, 3, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=tanh)
print ("Gen output:", gnet4.output_shape)
return gnet4
示例3: build_generator_64
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def build_generator_64(noise=None, ngf=128):
# noise input
InputNoise = InputLayer(shape=(None, 100), input_var=noise)
#FC Layer
gnet0 = DenseLayer(InputNoise, ngf*8*4*4, W=Normal(0.02), nonlinearity=relu)
print ("Gen fc1:", gnet0.output_shape)
#Reshape Layer
gnet1 = ReshapeLayer(gnet0,([0],ngf*8,4,4))
print ("Gen rs1:", gnet1.output_shape)
# DeConv Layer
gnet2 = Deconv2DLayer(gnet1, ngf*8, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
print ("Gen deconv2:", gnet2.output_shape)
# DeConv Layer
gnet3 = Deconv2DLayer(gnet2, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
print ("Gen deconv3:", gnet3.output_shape)
# DeConv Layer
gnet4 = Deconv2DLayer(gnet3, ngf*4, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
print ("Gen deconv4:", gnet4.output_shape)
# DeConv Layer
gnet5 = Deconv2DLayer(gnet4, ngf*2, (4,4), (2,2), crop=1, W=Normal(0.02),nonlinearity=relu)
print ("Gen deconv5:", gnet5.output_shape)
# DeConv Layer
gnet6 = Deconv2DLayer(gnet5, 3, (3,3), (1,1), crop='same', W=Normal(0.02),nonlinearity=tanh)
print ("Gen output:", gnet6.output_shape)
return gnet6
示例4: network_classifier
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def network_classifier(self, input_var):
network = {}
network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')
return network
示例5: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
pad=0, untie_biases=False, groups=1,
W=init.Uniform(), b=init.Constant(0.),
nonlinearity=nl.rectify, flip_filters=True,
convolution=T.nnet.conv2d, filter_dilation=(1, 1), **kwargs):
assert num_filters % groups == 0
self.groups = groups
super(GroupConv2DLayer, self).__init__(incoming, num_filters, filter_size,
stride=stride, pad=pad,
untie_biases=untie_biases,
W=W, b=b,
nonlinearity=nonlinearity,
flip_filters=flip_filters,
convolution=convolution,
filter_dilation=filter_dilation,
**kwargs)
示例6: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def __init__(self, incoming_vertex, incoming_edge, num_filters, filter_size, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
self.vertex_shape = incoming_vertex.output_shape
self.edge_shape = incoming_edge.output_shape
self.input_shape = incoming_vertex.output_shape
incomings = [incoming_vertex, incoming_edge]
self.vertex_incoming_index = 0
self.edge_incoming_index = 1
super(GraphConvLayer, self).__init__(incomings, **kwargs)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.W = self.add_param(W, self.get_W_shape(), name="W")
if b is None:
self.b = None
else:
self.b = self.add_param(b, (num_filters,), name="b", regularizable=False)
示例7: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def __init__(self, incoming, W_h=init.GlorotUniform(), b_h=init.Constant(0.), W_t=init.GlorotUniform(),
b_t=init.Constant(0.), nonlinearity=nonlinearities.rectify, **kwargs):
super(HighwayDenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (nonlinearities.identity if nonlinearity is None
else nonlinearity)
num_inputs = int(np.prod(self.input_shape[1:]))
self.W_h = self.add_param(W_h, (num_inputs, num_inputs), name="W_h")
if b_h is None:
self.b_h = None
else:
self.b_h = self.add_param(b_h, (num_inputs,), name="b_h", regularizable=False)
self.W_t = self.add_param(W_t, (num_inputs, num_inputs), name="W_t")
if b_t is None:
self.b_t = None
else:
self.b_t = self.add_param(b_t, (num_inputs,), name="b_t", regularizable=False)
示例8: initialization
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def initialization(name):
initializations = {'sigmoid':init.HeNormal(gain=1.0),
'softmax':init.HeNormal(gain=1.0),
'elu':init.HeNormal(gain=1.0),
'relu':init.HeNormal(gain=math.sqrt(2)),
'lrelu':init.HeNormal(gain=math.sqrt(2/(1+0.01**2))),
'vlrelu':init.HeNormal(gain=math.sqrt(2/(1+0.33**2))),
'rectify':init.HeNormal(gain=math.sqrt(2)),
'identity':init.HeNormal(gain=math.sqrt(2))
}
return initializations[name]
#################### BASELINE MODEL #####################
示例9: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def __init__(self, incoming, num_units, W=init.GlorotUniform(),
b=init.Constant(0.), nonlinearity=nonlinearities.rectify,
num_leading_axes=1, p=0.5, shared_axes=(), noise_samples=None,
**kwargs):
super(DenseDropoutLayer, self).__init__(
incoming, num_units, W, b, nonlinearity,
num_leading_axes, **kwargs)
self.p = p
self.shared_axes = shared_axes
# init randon number generator
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
# initialize noise samples
self.noise = self.init_noise(noise_samples)
示例10: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def __init__(self, incomings, nfilters, nrings=5, nrays=16,
W=LI.GlorotNormal(), b=LI.Constant(0.0),
normalize_rings=False, normalize_input=False, take_max=True,
nonlinearity=LN.rectify, **kwargs):
super(GCNNLayer, self).__init__(incomings, **kwargs)
# patch operator sizes
self.nfilters = nfilters
self.nrings = nrings
self.nrays = nrays
self.filter_shape = (nfilters, self.input_shapes[0][1], nrings, nrays)
self.biases_shape = (nfilters, )
# path operator parameters
self.normalize_rings = normalize_rings
self.normalize_input = normalize_input
self.take_max = take_max
self.nonlinearity = nonlinearity
# layer parameters:
# y = Wx + b, where x are the input features and y are the output features
self.W = self.add_param(W, self.filter_shape, name="W")
self.b = self.add_param(b, self.biases_shape, name="b", regularizable=False)
示例11: build_network_from_ae
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def build_network_from_ae(classn):
input_var = T.tensor4('inputs');
aug_var = T.matrix('aug_var');
target_var = T.matrix('targets');
ae = pickle.load(open('model_4ch/conv_ae.pkl', 'rb'));
input_layer_index = map(lambda pair : pair[0], ae.layers).index('input');
first_layer = ae.get_all_layers()[input_layer_index + 1];
input_layer = layers.InputLayer(shape=(None, 4, 32, 32), input_var = input_var);
first_layer.input_layer = input_layer;
encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer');
encode_layer = ae.get_all_layers()[encode_layer_index];
aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var);
cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1);
hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 100, nonlinearity = rectify);
network = layers.DenseLayer(incoming = hidden_layer, num_units = classn, nonlinearity = sigmoid);
return network, encode_layer, input_var, aug_var, target_var;
示例12: build_network_from_ae
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def build_network_from_ae(classn):
input_var = T.tensor4('inputs');
aug_var = T.matrix('aug_var');
target_var = T.matrix('targets');
ae = pickle.load(open('model/conv_ae.pkl', 'rb'));
input_layer_index = map(lambda pair : pair[0], ae.layers).index('input');
first_layer = ae.get_all_layers()[input_layer_index + 1];
input_layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var = input_var);
first_layer.input_layer = input_layer;
encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer');
encode_layer = ae.get_all_layers()[encode_layer_index];
aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var);
cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1);
hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 100, nonlinearity = rectify);
network_mll = layers.DenseLayer(incoming = hidden_layer, num_units = 12, nonlinearity = sigmoid);
network_sll = layers.DenseLayer(incoming = hidden_layer, num_units = 7, nonlinearity = sigmoid);
network = lasagne.layers.ConcatLayer([network_mll, network_sll], axis = 1);
return network, encode_layer, input_var, aug_var, target_var;
示例13: affine_relu_conv
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def affine_relu_conv(network, channels, filter_size, dropout, name_prefix):
network = ScaleLayer(network, name=name_prefix + '_scale')
network = BiasLayer(network, name=name_prefix + '_shift')
network = NonlinearityLayer(network, nonlinearity=rectify,
name=name_prefix + '_relu')
network = Conv2DLayer(network, channels, filter_size, pad='same',
W=lasagne.init.HeNormal(gain='relu'),
b=None, nonlinearity=None,
name=name_prefix + '_conv')
if dropout:
network = DropoutLayer(network, dropout)
return network
示例14: bn_relu_conv
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def bn_relu_conv(network, channels, filter_size, dropout, name_prefix):
network = BatchNormLayer(network, name=name_prefix + '_bn')
network = NonlinearityLayer(network, nonlinearity=rectify,
name=name_prefix + '_relu')
network = Conv2DLayer(network, channels, filter_size, pad='same',
W=lasagne.init.HeNormal(gain='relu'),
b=None, nonlinearity=None,
name=name_prefix + '_conv')
if dropout:
network = DropoutLayer(network, dropout)
return network
示例15: build_generator_toy
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import rectify [as 别名]
def build_generator_toy(noise=None, nd=512):
InputNoise = InputLayer(shape=(None, 2), input_var=noise)
print ("Gen input:", InputNoise.output_shape)
gnet0 = DenseLayer(InputNoise, nd, W=Normal(0.02), nonlinearity=relu)
print ("Gen fc0:", gnet0.output_shape)
gnet1 = DenseLayer(gnet0, nd, W=Normal(0.02), nonlinearity=relu)
print ("Gen fc1:", gnet1.output_shape)
gnet2 = DenseLayer(gnet1, nd, W=Normal(0.02), nonlinearity=relu)
print ("Gen fc2:", gnet2.output_shape)
gnetout = DenseLayer(gnet2, 2, W=Normal(0.02), nonlinearity=None)
print ("Gen output:", gnetout.output_shape)
return gnetout