本文整理汇总了Python中lasagne.nonlinearities.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python nonlinearities.softmax方法的具体用法?Python nonlinearities.softmax怎么用?Python nonlinearities.softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lasagne.nonlinearities
的用法示例。
在下文中一共展示了nonlinearities.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: network_classifier
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def network_classifier(self, input_var):
network = {}
network['classifier/input'] = InputLayer(shape=(None, 3, 64, 64), input_var=input_var, name='classifier/input')
network['classifier/conv1'] = Conv2DLayer(network['classifier/input'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv1')
network['classifier/pool1'] = MaxPool2DLayer(network['classifier/conv1'], pool_size=2, stride=2, pad=0, name='classifier/pool1')
network['classifier/conv2'] = Conv2DLayer(network['classifier/pool1'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv2')
network['classifier/pool2'] = MaxPool2DLayer(network['classifier/conv2'], pool_size=2, stride=2, pad=0, name='classifier/pool2')
network['classifier/conv3'] = Conv2DLayer(network['classifier/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv3')
network['classifier/pool3'] = MaxPool2DLayer(network['classifier/conv3'], pool_size=2, stride=2, pad=0, name='classifier/pool3')
network['classifier/conv4'] = Conv2DLayer(network['classifier/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='classifier/conv4')
network['classifier/pool4'] = MaxPool2DLayer(network['classifier/conv4'], pool_size=2, stride=2, pad=0, name='classifier/pool4')
network['classifier/dense1'] = DenseLayer(network['classifier/pool4'], num_units=64, nonlinearity=rectify, name='classifier/dense1')
network['classifier/output'] = DenseLayer(network['classifier/dense1'], num_units=10, nonlinearity=softmax, name='classifier/output')
return network
示例2: initialization
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def initialization(name):
initializations = {'sigmoid':init.HeNormal(gain=1.0),
'softmax':init.HeNormal(gain=1.0),
'elu':init.HeNormal(gain=1.0),
'relu':init.HeNormal(gain=math.sqrt(2)),
'lrelu':init.HeNormal(gain=math.sqrt(2/(1+0.01**2))),
'vlrelu':init.HeNormal(gain=math.sqrt(2/(1+0.33**2))),
'rectify':init.HeNormal(gain=math.sqrt(2)),
'identity':init.HeNormal(gain=math.sqrt(2))
}
return initializations[name]
#################### BASELINE MODEL #####################
示例3: build_network_from_ae
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def build_network_from_ae(classn):
input_var = T.tensor4('inputs');
aug_var = T.matrix('aug_var');
target_var = T.matrix('targets');
ae = pickle.load(open('model/conv_ae.pkl', 'rb'));
input_layer_index = map(lambda pair : pair[0], ae.layers).index('input');
first_layer = ae.get_all_layers()[input_layer_index + 1];
input_layer = layers.InputLayer(shape=(None, 3, 32, 32), input_var = input_var);
first_layer.input_layer = input_layer;
encode_layer_index = map(lambda pair : pair[0], ae.layers).index('encode_layer');
encode_layer = ae.get_all_layers()[encode_layer_index];
aug_layer = layers.InputLayer(shape=(None, classn), input_var = aug_var);
cat_layer = lasagne.layers.ConcatLayer([encode_layer, aug_layer], axis = 1);
hidden_layer = layers.DenseLayer(incoming = cat_layer, num_units = 100, nonlinearity = rectify);
network_mll = layers.DenseLayer(incoming = hidden_layer, num_units = 12, nonlinearity = sigmoid);
network_sll = layers.DenseLayer(incoming = hidden_layer, num_units = 7, nonlinearity = softmax);
network = lasagne.layers.ConcatLayer([network_mll, network_sll], axis = 1);
return network, encode_layer, input_var, aug_var, target_var;
示例4: create_network
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def create_network():
l = 1000
pool_size = 5
test_size1 = 13
test_size2 = 7
test_size3 = 5
kernel1 = 128
kernel2 = 128
kernel3 = 128
layer1 = InputLayer(shape=(None, 1, 4, l+1024))
layer2_1 = SliceLayer(layer1, indices=slice(0, l), axis = -1)
layer2_2 = SliceLayer(layer1, indices=slice(l, None), axis = -1)
layer2_3 = SliceLayer(layer2_2, indices = slice(0,4), axis = -2)
layer2_f = FlattenLayer(layer2_3)
layer3 = Conv2DLayer(layer2_1,num_filters = kernel1, filter_size = (4,test_size1))
layer4 = Conv2DLayer(layer3,num_filters = kernel1, filter_size = (1,test_size1))
layer5 = Conv2DLayer(layer4,num_filters = kernel1, filter_size = (1,test_size1))
layer6 = MaxPool2DLayer(layer5, pool_size = (1,pool_size))
layer7 = Conv2DLayer(layer6,num_filters = kernel2, filter_size = (1,test_size2))
layer8 = Conv2DLayer(layer7,num_filters = kernel2, filter_size = (1,test_size2))
layer9 = Conv2DLayer(layer8,num_filters = kernel2, filter_size = (1,test_size2))
layer10 = MaxPool2DLayer(layer9, pool_size = (1,pool_size))
layer11 = Conv2DLayer(layer10,num_filters = kernel3, filter_size = (1,test_size3))
layer12 = Conv2DLayer(layer11,num_filters = kernel3, filter_size = (1,test_size3))
layer13 = Conv2DLayer(layer12,num_filters = kernel3, filter_size = (1,test_size3))
layer14 = MaxPool2DLayer(layer13, pool_size = (1,pool_size))
layer14_d = DenseLayer(layer14, num_units= 256)
layer3_2 = DenseLayer(layer2_f, num_units = 128)
layer15 = ConcatLayer([layer14_d,layer3_2])
layer16 = DropoutLayer(layer15,p=0.5)
layer17 = DenseLayer(layer16, num_units=256)
network = DenseLayer(layer17, num_units= 2, nonlinearity=softmax)
return network
#random search to initialize the weights
示例5: network_discriminator
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def network_discriminator(self, features):
network = {}
network['discriminator/conv2'] = Conv2DLayer(features, num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv2')
network['discriminator/pool2'] = MaxPool2DLayer(network['discriminator/conv2'], pool_size=2, stride=2, pad=0, name='discriminator/pool2')
network['discriminator/conv3'] = Conv2DLayer(network['discriminator/pool2'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv3')
network['discriminator/pool3'] = MaxPool2DLayer(network['discriminator/conv3'], pool_size=2, stride=2, pad=0, name='discriminator/pool3')
network['discriminator/conv4'] = Conv2DLayer(network['discriminator/pool3'], num_filters=32, filter_size=3, stride=1, pad='valid', nonlinearity=rectify, name='discriminator/conv4')
network['discriminator/pool4'] = MaxPool2DLayer(network['discriminator/conv4'], pool_size=2, stride=2, pad=0, name='discriminator/pool4')
network['discriminator/dense1'] = DenseLayer(network['discriminator/pool4'], num_units=64, nonlinearity=rectify, name='discriminator/dense1')
network['discriminator/output'] = DenseLayer(network['discriminator/dense1'], num_units=2, nonlinearity=softmax, name='discriminator/output')
return network
示例6: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def __init__(
self,
env_spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=NL.tanh,
output_b_init=None,
weight_signal=1.0,
weight_nonsignal=1.0,
weight_smc=1.0):
"""
:param env_spec: A spec for the mdp.
:param hidden_sizes: list of sizes for the fully connected hidden layers
:param hidden_nonlinearity: nonlinearity used for each hidden layer
:return:
"""
Serializable.quick_init(self, locals())
assert isinstance(env_spec.action_space, Discrete)
output_b_init = compute_output_b_init(env_spec.action_space.names,
output_b_init, weight_signal, weight_nonsignal, weight_smc)
prob_network = MLP(
input_shape=(env_spec.observation_space.flat_dim,),
output_dim=env_spec.action_space.n,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=NL.softmax,
output_b_init=output_b_init
)
super(InitCategoricalMLPPolicy, self).__init__(env_spec, hidden_sizes,
hidden_nonlinearity, prob_network)
# Modified from RLLab GRUNetwork
示例7: calc_loss
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def calc_loss(prediction, targets):
#categorical crossentropy is the best choice for a multi-class softmax output
loss = T.mean(objectives.categorical_crossentropy(prediction, targets))
return loss
示例8: ResNet_FullPreActivation
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def ResNet_FullPreActivation(input_shape=(None, 3, PIXELS, PIXELS), input_var=None, n_classes=10, n=18):
"""
Adapted from https://github.com/Lasagne/Recipes/tree/master/papers/deep_residual_learning.
Tweaked to be consistent with 'Identity Mappings in Deep Residual Networks', Kaiming He et al. 2016 (https://arxiv.org/abs/1603.05027)
Formula to figure out depth: 6n + 2
"""
# Building the network
l_in = InputLayer(shape=input_shape, input_var=input_var)
# first layer, output is 16 x 32 x 32
l = batch_norm(ConvLayer(l_in, num_filters=16, filter_size=(3, 3), stride=(1, 1), nonlinearity=rectify, pad='same', W=he_norm))
# first stack of residual blocks, output is 16 x 32 x 32
l = residual_block(l, first=True)
for _ in range(1, n):
l = residual_block(l)
# second stack of residual blocks, output is 32 x 16 x 16
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
# third stack of residual blocks, output is 64 x 8 x 8
l = residual_block(l, increase_dim=True)
for _ in range(1, n):
l = residual_block(l)
bn_post_conv = BatchNormLayer(l)
bn_post_relu = NonlinearityLayer(bn_post_conv, rectify)
# average pooling
avg_pool = GlobalPoolLayer(bn_post_relu)
# fully connected layer
network = DenseLayer(avg_pool, num_units=n_classes, W=HeNormal(), nonlinearity=softmax)
return network
示例9: nonlinearity
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def nonlinearity(name):
nonlinearities = {'rectify': nl.rectify,
'relu': nl.rectify,
'lrelu': nl.LeakyRectify(0.01),
'vlrelu': nl.LeakyRectify(0.33),
'elu': nl.elu,
'softmax': nl.softmax,
'sigmoid': nl.sigmoid,
'identity':nl.identity}
return nonlinearities[name]
示例10: calc_loss
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def calc_loss(prediction, targets):
# Categorical crossentropy is the best choice for a multi-class softmax output
loss = T.mean(objectives.categorical_crossentropy(prediction, targets))
return loss
示例11: __init__
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def __init__(self, x, y, args):
self.params_theta = []
self.params_lambda = []
self.params_weight = []
if args.dataset == 'mnist':
input_size = (None, 1, 28, 28)
elif args.dataset == 'cifar10':
input_size = (None, 3, 32, 32)
else:
raise AssertionError
layers = [ll.InputLayer(input_size)]
self.penalty = theano.shared(np.array(0.))
#conv1
layers.append(Conv2DLayerWithReg(args, layers[-1], 20, 5))
self.add_params_to_self(args, layers[-1])
layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
#conv1
layers.append(Conv2DLayerWithReg(args, layers[-1], 50, 5))
self.add_params_to_self(args, layers[-1])
layers.append(ll.MaxPool2DLayer(layers[-1], pool_size=2, stride=2))
#fc1
layers.append(DenseLayerWithReg(args, layers[-1], num_units=500))
self.add_params_to_self(args, layers[-1])
#softmax
layers.append(DenseLayerWithReg(args, layers[-1], num_units=10, nonlinearity=nonlinearities.softmax))
self.add_params_to_self(args, layers[-1])
self.layers = layers
self.y = ll.get_output(layers[-1], x, deterministic=False)
self.prediction = T.argmax(self.y, axis=1)
# self.penalty = penalty if penalty != 0. else T.constant(0.)
print(self.params_lambda)
# time.sleep(20)
# cost function
self.loss = T.mean(categorical_crossentropy(self.y, y))
self.lossWithPenalty = T.add(self.loss, self.penalty)
print "loss and losswithpenalty", type(self.loss), type(self.lossWithPenalty)
示例12: build_model
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
示例13: build_model
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 4, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
示例14: build_model
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out
示例15: build_model
# 需要导入模块: from lasagne import nonlinearities [as 别名]
# 或者: from lasagne.nonlinearities import softmax [as 别名]
def build_model(self, input_dim):
l_in = InputLayer(shape=(self.batch_size, input_dim))
l_hidden1 = DenseLayer(l_in, num_units=self.n_hidden / 2, nonlinearity=rectify)
l_hidden1_dropout = DropoutLayer(l_hidden1, p=self.dropout)
l_hidden2 = DenseLayer(l_hidden1_dropout, num_units=self.n_hidden, nonlinearity=rectify)
l_hidden2_dropout = DropoutLayer(l_hidden2, p=self.dropout)
l_hidden3 = DenseLayer(l_hidden2_dropout, num_units=self.n_hidden / 2, nonlinearity=rectify)
l_hidden3_dropout = DropoutLayer(l_hidden3, p=self.dropout)
l_out = DenseLayer(l_hidden3_dropout, num_units=self.n_classes_, nonlinearity=softmax)
return l_out