本文整理汇总了Python中nolearn.lasagne.NeuralNet.get_params方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.get_params方法的具体用法?Python NeuralNet.get_params怎么用?Python NeuralNet.get_params使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.get_params方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_clone
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_params [as 别名]
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
check_input=True,
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
'loss',
'objective',
'train_split',
'eval_size',
'X_tensor_type',
'on_epoch_finished',
'on_batch_finished',
'on_training_started',
'on_training_finished',
'custom_scores',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
示例2: test_clone
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_params [as 别名]
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[("input", InputLayer), ("hidden", DenseLayer), ("output", DenseLayer)],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={"hidden_num_units": 100},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
"batch_iterator_train",
"batch_iterator_test",
"output_nonlinearity",
"loss",
"objective",
"train_split",
"eval_size",
"X_tensor_type",
"on_epoch_finished",
"on_batch_finished",
"on_training_started",
"on_training_finished",
"custom_score",
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
示例3: test_clone
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_params [as 别名]
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import negative_log_likelihood
from nolearn.lasagne import BatchIterator
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
loss=negative_log_likelihood,
batch_iterator_train=BatchIterator(batch_size=100),
X_tensor_type=T.matrix,
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1,
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
示例4: __init__
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_params [as 别名]
#.........这里部分代码省略.........
# x = theano.tensor.as_tensor_variable(x)
# log = theano.tensor.log(x)
# m = theano.tensor.mean(log,axis=axis)
# g = m
log = np.log(x)
m = log.mean(axis = axis)
g = np.exp(m)
#g = theano.tensor.exp(m)
#g = theano.tensor.exp(theano.tensor.mean(theano.tensor.log(x), axis=axis))
print "gmean",g.type,g
return g
def l2_norm(x,axis=None):
x = theano.tensor.as_tensor_variable(x)
s = theano.tensor.sum(x,axis=axis)
#l = x.norm(2, axis=axis)
print "norm",l.type,l
return l
def me(x,axis=None):
x = theano.tensor.as_tensor_variable(x)
m = theano.tensor.mean(x,axis=axis)
print "mean",m.type,m
return m
#print type(theano.tensor.mean),type(geom_mean),type(l2_norm)
#learning_rate = 0.0001
#learning_rate = 0.0005
#learning_rate = .001
learning_rate = .00001
# if 'pat' in self.subject:
# learning_rate = 0.0001
#FSIZE1 = (1, 2)
#FSIZE2 = (1, X.shape[3])
convnet = NeuralNet(
layers = [
(InputLayer,{'shape' : (None,1 , X.shape[2],X.shape[3])}),
(Conv2DLayer,{'num_filters' : NUM_FILTERS1, 'filter_size' : FSIZE1}),
(DropoutLayer,{'p' : .75}),
(ReshapeLayer,{'shape' : ([0],[2],[1],[3])}),
(Conv2DLayer,{'name': 'conv2', 'num_filters' : NUM_FILTERS2, 'filter_size' : FSIZE2}),
#(DropoutLayer,{'p' : .85}),
#(ReshapeLayer,{'shape' : ([0],[2],[1],[3])}),
#(Conv2DLayer,{'name' : 'conv3', 'num_filters' : NUM_FILTERS3, 'filter_size' : FSIZE3}),
(GlobalPoolLayer,{'name' : 'g1', 'incoming' : 'conv2','pool_function' : me }),
(GlobalPoolLayer,{'name' : 'g2', 'incoming' : 'conv2','pool_function' : theano.tensor.max }),
(GlobalPoolLayer,{'name' : 'g3', 'incoming' : 'conv2','pool_function' : theano.tensor.min }),
(GlobalPoolLayer,{'name' : 'g4', 'incoming' : 'conv2','pool_function' : theano.tensor.var }),
#(GlobalPoolLayer,{'name' : 'g5', 'incoming' : 'conv2','pool_function' : geom_mean}),
#(GlobalPoolLayer,{'name' : 'g6', 'incoming' : 'conv2','pool_function' : l2_norm }),
(ConcatLayer,{'incomings' : ['g1','g2','g3','g4']}),#]}),#
(DenseLayer, {'num_units': 256}),
(DropoutLayer,{'p':.5}),
(DenseLayer, {'num_units': 256}),
(DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
],
update_learning_rate=theano.shared(float32(learning_rate)),
update_momentum=theano.shared(float32(0.9)),
verbose=1,
max_epochs = 100000,
on_epoch_finished=[
EarlyStopping(patience=100)
],
)
return convnet
def fit(self,X,y,xt,yt):
X,y,xt,yt = formatData(X,y=y,Xt=xt,yt=yt)
self.convnet = self.make_cnn(X,y)
print "shape",X.shape
self.convnet.fit(X,y,xt,yt)
def predict_proba(self,X):
X,_,_,_ = formatData(X)
return self.convnet.predict_proba(X)
def predict(self,X):
X,_,_,_ = formatData(X)
return self.convnet.predict(X)
def get_params(self,deep):
return self.convnet.get_params()
def load_params_from(self,net):
return self.convnet.load_params_from(net)