本文整理汇总了Python中nolearn.lasagne.NeuralNet.y_train方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.y_train方法的具体用法?Python NeuralNet.y_train怎么用?Python NeuralNet.y_train使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.y_train方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: define_net
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import y_train [as 别名]
#.........这里部分代码省略.........
test_iterator = ParallelBatchIterator(keys, params.BATCH_SIZE, std, mean, y_all = y)
if params.REGRESSION:
y = util.float32(y)
y = y[:, np.newaxis]
if 'gpu' in theano.config.device:
# Half of coma does not support cuDNN, check whether we can use it on this node
# If not, use cuda_convnet bindings
from theano.sandbox.cuda.dnn import dnn_available
if dnn_available():
from lasagne.layers import dnn
Conv2DLayer = dnn.Conv2DDNNLayer
MaxPool2DLayer = dnn.MaxPool2DDNNLayer
else:
from lasagne.layers import cuda_convnet
Conv2DLayer = cuda_convnet.Conv2DCCLayer
MaxPool2DLayer = cuda_convnet.MaxPool2DCCLayer
else:
Conv2DLayer = layers.Conv2DLayer
MaxPool2DLayer = layers.MaxPool2DLayer
Maxout = layers.pool.FeaturePoolLayer
net = NeuralNet(
layers=[
('input', layers.InputLayer),
('conv0', Conv2DLayer),
('pool0', MaxPool2DLayer),
('conv1', Conv2DLayer),
('pool1', MaxPool2DLayer),
('conv2', Conv2DLayer),
('pool2', MaxPool2DLayer),
('conv3', Conv2DLayer),
('pool3', MaxPool2DLayer),
('conv4', Conv2DLayer),
('pool4', MaxPool2DLayer),
('dropouthidden1', layers.DropoutLayer),
('hidden1', layers.DenseLayer),
('maxout1', Maxout),
('dropouthidden2', layers.DropoutLayer),
('hidden2', layers.DenseLayer),
('maxout2', Maxout),
('dropouthidden3', layers.DropoutLayer),
('output', layers.DenseLayer),
],
input_shape=(None, params.CHANNELS, params.PIXELS, params.PIXELS),
conv0_num_filters=32, conv0_filter_size=(5, 5), conv0_stride=(2, 2), pool0_pool_size=(2, 2), pool0_stride=(2, 2),
conv1_num_filters=64, conv1_filter_size=(5, 5), conv1_border_mode = 'same', pool1_pool_size=(2, 2), pool1_stride=(2, 2),
conv2_num_filters=128, conv2_filter_size=(3, 3), conv2_border_mode = 'same', pool2_pool_size=(2, 2), pool2_stride=(2, 2),
conv3_num_filters=192, conv3_filter_size=(3, 3), conv3_border_mode = 'same', pool3_pool_size=(2, 2), pool3_stride=(2, 2),
conv4_num_filters=256, conv4_filter_size=(3, 3), conv4_border_mode = 'same', pool4_pool_size=(2, 2), pool4_stride=(2, 2),
hidden1_num_units=1024,
hidden2_num_units=1024,
dropouthidden1_p=0.5,
dropouthidden2_p=0.5,
dropouthidden3_p=0.5,
maxout1_pool_size=2,
maxout2_pool_size=2,
output_num_units=1 if params.REGRESSION else 5,
output_nonlinearity=None if params.REGRESSION else nonlinearities.softmax,
update_learning_rate=theano.shared(util.float32(params.START_LEARNING_RATE)),
update_momentum=theano.shared(util.float32(params.MOMENTUM)),
custom_score=('kappa', quadratic_kappa),
regression=params.REGRESSION,
batch_iterator_train=train_iterator,
batch_iterator_test=test_iterator,
on_epoch_finished=[
AdjustVariable('update_learning_rate', start=params.START_LEARNING_RATE),
stats.Stat(),
ModelSaver()
],
max_epochs=500,
verbose=1,
# Only relevant when create_validation_split = True
eval_size=0.1,
# Need to specify splits manually like indicated below!
create_validation_split=params.SUBSET>0,
)
# It is recommended to use the same training/validation split every model for ensembling and threshold optimization
#
# To set specific training/validation split:
net.X_train = np.load(params.IMAGE_SOURCE + "/X_train.npy")
net.X_valid = np.load(params.IMAGE_SOURCE + "/X_valid.npy")
net.y_train = np.load(params.IMAGE_SOURCE + "/y_train.npy")
net.y_valid = np.load(params.IMAGE_SOURCE + "/y_valid.npy")
return net, X, y