本文整理汇总了Python中nolearn.lasagne.NeuralNet.get_output方法的典型用法代码示例。如果您正苦于以下问题:Python NeuralNet.get_output方法的具体用法?Python NeuralNet.get_output怎么用?Python NeuralNet.get_output使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nolearn.lasagne.NeuralNet
的用法示例。
在下文中一共展示了NeuralNet.get_output方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from nolearn.lasagne import NeuralNet [as 别名]
# 或者: from nolearn.lasagne.NeuralNet import get_output [as 别名]
def main():
data = load_av_letters('data/allData_mouthROIs.mat')
# create the necessary variable mappings
data_matrix = data['dataMatrix']
data_matrix_len = data_matrix.shape[0]
targets_vec = data['targetsVec']
vid_len_vec = data['videoLengthVec']
iter_vec = data['iterVec']
indexes = create_split_index(data_matrix_len, vid_len_vec, iter_vec)
# split the data
train_data = data_matrix[indexes == True]
train_targets = targets_vec[indexes == True]
test_data = data_matrix[indexes == False]
test_targets = targets_vec[indexes == False]
idx = [i for i, elem in enumerate(test_targets) if elem == 20]
print(train_data.shape)
print(test_data.shape)
print(sum([train_data.shape[0], test_data.shape[0]]))
# resize the input data to 40 x 30
train_data_resized = resize_images(train_data).astype(np.float32)
# normalize the inputs [0 - 1]
train_data_resized = normalize_input(train_data_resized, centralize=True)
test_data_resized = resize_images(test_data).astype(np.float32)
test_data_resized = normalize_input(test_data_resized, centralize=True)
dic = {}
dic['trainDataResized'] = train_data_resized
dic['testDataResized'] = test_data_resized
"""second experiment: overcomplete sigmoid encoder/decoder, squared loss"""
encode_size = 2500
sigma = 0.5
# to get tied weights in the encoder/decoder, create this shared weightMatrix
# 1200 x 2000
w1, layer1 = build_encoder_layers(1200, 2500, sigma)
ae1 = NeuralNet(
layers=layer1,
max_epochs=50,
objective_loss_function=squared_error,
update=adadelta,
regression=True,
verbose=1
)
load = True
save = False
if load:
print('[LOAD] layer 1...')
ae1.load_params_from('layer1.dat')
else:
print('[TRAIN] layer 1...')
ae1.fit(train_data_resized, train_data_resized)
# save params
if save:
print('[SAVE] layer 1...')
ae1.save_params_to('layer1.dat')
train_encoded1 = ae1.get_output('encoder', train_data_resized) # 12293 x 2000
w2, layer2 = build_encoder_layers(2500, 1250)
ae2 = NeuralNet(
layers=layer2,
max_epochs=50,
objective_loss_function=squared_error,
update=adadelta,
regression=True,
verbose=1
)
load2 = True
if load2:
print('[LOAD] layer 2...')
ae2.load_params_from('layer2.dat')
else:
print('[TRAIN] layer 2...')
ae2.fit(train_encoded1, train_encoded1)
save2 = False
if save2:
print('[SAVE] layer 2...')
ae2.save_params_to('layer2.dat')
train_encoded2 = ae2.get_output('encoder', train_encoded1) # 12293 x 1250
w3, layer3 = build_encoder_layers(1250, 600)
ae3 = NeuralNet(
layers=layer3,
max_epochs=100,
objective_loss_function=squared_error,
#.........这里部分代码省略.........