本文整理汇总了Python中blocks.bricks.recurrent.LSTM.weights_init方法的典型用法代码示例。如果您正苦于以下问题:Python LSTM.weights_init方法的具体用法?Python LSTM.weights_init怎么用?Python LSTM.weights_init使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.bricks.recurrent.LSTM
的用法示例。
在下文中一共展示了LSTM.weights_init方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import weights_init [as 别名]
def main(nvis, nhid, encoding_lstm_dim, decoding_lstm_dim, T=1):
x = tensor.matrix('features')
# Construct and initialize model
encoding_mlp = MLP([Tanh()], [None, None])
decoding_mlp = MLP([Tanh()], [None, None])
encoding_lstm = LSTM(dim=encoding_lstm_dim)
decoding_lstm = LSTM(dim=decoding_lstm_dim)
draw = DRAW(nvis=nvis, nhid=nhid, T=T, encoding_mlp=encoding_mlp,
decoding_mlp=decoding_mlp, encoding_lstm=encoding_lstm,
decoding_lstm=decoding_lstm, biases_init=Constant(0),
weights_init=Orthogonal())
draw.push_initialization_config()
encoding_lstm.weights_init = IsotropicGaussian(std=0.001)
decoding_lstm.weights_init = IsotropicGaussian(std=0.001)
draw.initialize()
# Compute cost
cost = -draw.log_likelihood_lower_bound(x).mean()
cost.name = 'nll_upper_bound'
model = Model(cost)
# Datasets and data streams
mnist_train = BinarizedMNIST('train')
train_loop_stream = ForceFloatX(DataStream(
dataset=mnist_train,
iteration_scheme=SequentialScheme(mnist_train.num_examples, 100)))
train_monitor_stream = ForceFloatX(DataStream(
dataset=mnist_train,
iteration_scheme=SequentialScheme(mnist_train.num_examples, 500)))
mnist_valid = BinarizedMNIST('valid')
valid_monitor_stream = ForceFloatX(DataStream(
dataset=mnist_valid,
iteration_scheme=SequentialScheme(mnist_valid.num_examples, 500)))
mnist_test = BinarizedMNIST('test')
test_monitor_stream = ForceFloatX(DataStream(
dataset=mnist_test,
iteration_scheme=SequentialScheme(mnist_test.num_examples, 500)))
# Get parameters and monitoring channels
computation_graph = ComputationGraph([cost])
params = VariableFilter(roles=[PARAMETER])(computation_graph.variables)
monitoring_channels = dict([
('avg_' + channel.tag.name, channel.mean()) for channel in
VariableFilter(name='.*term$')(computation_graph.auxiliary_variables)])
for name, channel in monitoring_channels.items():
channel.name = name
monitored_quantities = monitoring_channels.values() + [cost]
# Training loop
step_rule = RMSProp(learning_rate=1e-3, decay_rate=0.95)
algorithm = GradientDescent(cost=cost, params=params, step_rule=step_rule)
algorithm.add_updates(computation_graph.updates)
main_loop = MainLoop(
model=model, data_stream=train_loop_stream, algorithm=algorithm,
extensions=[
Timing(),
SerializeMainLoop('vae.pkl', save_separately=['model']),
FinishAfter(after_n_epochs=200),
DataStreamMonitoring(
monitored_quantities, train_monitor_stream, prefix="train",
updates=computation_graph.updates),
DataStreamMonitoring(
monitored_quantities, valid_monitor_stream, prefix="valid",
updates=computation_graph.updates),
DataStreamMonitoring(
monitored_quantities, test_monitor_stream, prefix="test",
updates=computation_graph.updates),
ProgressBar(),
Printing()])
main_loop.run()