本文整理汇总了Python中cle.cle.train.Training类的典型用法代码示例。如果您正苦于以下问题:Python Training类的具体用法?Python Training怎么用?Python Training使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Training类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Net
nodes = [h1, h2, h3, h4, cost]
rnn = Net(inputs=inputs, inputs_dim=inputs_dim, nodes=nodes)
cost = unpack(rnn.build_recurrent_graph(output_args=[cost]))
cost = cost.mean()
cost.name = 'cost'
model.graphs = [rnn]
optimizer = Adam(
lr=0.001
)
extension = [
GradientClipping(batch_size=batch_size),
EpochCount(100),
Monitoring(freq=100,
ddout=[cost]),
Picklize(freq=200, path=save_path)
]
mainloop = Training(
name='toy_bb_gflstm',
data=Iterator(trdata, batch_size),
model=model,
optimizer=optimizer,
cost=cost,
outputs=[cost],
extension=extension
)
mainloop.run()
示例2: main
#.........这里部分代码省略.........
params = OrderedDict()
for node in nodes:
if node.initialize() is not None:
params.update(node.initialize())
params = init_tparams(params)
s_0 = rnn.get_init_state(batch_size)
x_1_temp = x_1.fprop([x], params)
def inner_fn(x_t, s_tm1):
s_t = rnn.fprop([[x_t], [s_tm1]], params)
return s_t
((s_temp), updates) = theano.scan(fn=inner_fn,
sequences=[x_1_temp],
outputs_info=[s_0])
for k, v in updates.iteritems():
k.default_update = v
s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)
theta_1_temp = theta_1.fprop([s_temp], params)
theta_mu_temp = theta_mu.fprop([theta_1_temp], params)
theta_sig_temp = theta_sig.fprop([theta_1_temp], params)
corr_temp = corr.fprop([theta_1_temp], params)
binary_temp = binary.fprop([theta_1_temp], params)
x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
theta_mu_in = theta_mu_temp.reshape((x_shape[0]*x_shape[1], -1))
theta_sig_in = theta_sig_temp.reshape((x_shape[0]*x_shape[1], -1))
corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1))
binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1))
recon = BiGauss(x_in, theta_mu_in, theta_sig_in, corr_in, binary_in)
recon = recon.reshape((x_shape[0], x_shape[1]))
recon = recon * mask
recon_term = recon.sum(axis=0).mean()
recon_term.name = 'nll'
max_x = x.max()
mean_x = x.mean()
min_x = x.min()
max_x.name = 'max_x'
mean_x.name = 'mean_x'
min_x.name = 'min_x'
max_theta_mu = theta_mu_in.max()
mean_theta_mu = theta_mu_in.mean()
min_theta_mu = theta_mu_in.min()
max_theta_mu.name = 'max_theta_mu'
mean_theta_mu.name = 'mean_theta_mu'
min_theta_mu.name = 'min_theta_mu'
max_theta_sig = theta_sig_in.max()
mean_theta_sig = theta_sig_in.mean()
min_theta_sig = theta_sig_in.min()
max_theta_sig.name = 'max_theta_sig'
mean_theta_sig.name = 'mean_theta_sig'
min_theta_sig.name = 'min_theta_sig'
model.inputs = [x, mask]
model._params = params
model.nodes = nodes
optimizer = Adam(
lr=lr
)
extension = [
GradientClipping(batch_size=batch_size),
EpochCount(epoch),
Monitoring(freq=monitoring_freq,
ddout=[recon_term,
max_theta_sig, mean_theta_sig, min_theta_sig,
max_x, mean_x, min_x,
max_theta_mu, mean_theta_mu, min_theta_mu],
data=[Iterator(valid_data, batch_size)]),
Picklize(freq=monitoring_freq, path=save_path),
EarlyStopping(freq=monitoring_freq, path=save_path),
WeightNorm()
]
mainloop = Training(
name=pkl_name,
data=Iterator(train_data, batch_size),
model=model,
optimizer=optimizer,
cost=recon_term,
outputs=[recon_term],
extension=extension
)
mainloop.run()
示例3: main
#.........这里部分代码省略.........
theta_sig_in = theta_sig_temp.reshape((x_shape[0] * x_shape[1], -1))
corr_in = corr_temp.reshape((x_shape[0] * x_shape[1], -1))
binary_in = binary_temp.reshape((x_shape[0] * x_shape[1], -1))
recon = BiGauss(x_in, theta_mu_in, theta_sig_in, corr_in, binary_in)
recon = recon.reshape((x_shape[0], x_shape[1]))
recon = recon * mask
recon_term = recon.sum(axis=0).mean()
recon_term.name = "recon_term"
kl_temp = kl_temp * mask
kl_term = kl_temp.sum(axis=0).mean()
kl_term.name = "kl_term"
nll_upper_bound = recon_term + kl_term
nll_upper_bound.name = "nll_upper_bound"
max_x = x.max()
mean_x = x.mean()
min_x = x.min()
max_x.name = "max_x"
mean_x.name = "mean_x"
min_x.name = "min_x"
max_theta_mu = theta_mu_in.max()
mean_theta_mu = theta_mu_in.mean()
min_theta_mu = theta_mu_in.min()
max_theta_mu.name = "max_theta_mu"
mean_theta_mu.name = "mean_theta_mu"
min_theta_mu.name = "min_theta_mu"
max_theta_sig = theta_sig_in.max()
mean_theta_sig = theta_sig_in.mean()
min_theta_sig = theta_sig_in.min()
max_theta_sig.name = "max_theta_sig"
mean_theta_sig.name = "mean_theta_sig"
min_theta_sig.name = "min_theta_sig"
max_phi_sig = phi_sig_temp.max()
mean_phi_sig = phi_sig_temp.mean()
min_phi_sig = phi_sig_temp.min()
max_phi_sig.name = "max_phi_sig"
mean_phi_sig.name = "mean_phi_sig"
min_phi_sig.name = "min_phi_sig"
max_prior_sig = prior_sig_temp.max()
mean_prior_sig = prior_sig_temp.mean()
min_prior_sig = prior_sig_temp.min()
max_prior_sig.name = "max_prior_sig"
mean_prior_sig.name = "mean_prior_sig"
min_prior_sig.name = "min_prior_sig"
model.inputs = [x, mask]
model.params = params
model.nodes = nodes
optimizer = Adam(lr=lr)
extension = [
GradientClipping(batch_size=batch_size),
EpochCount(epoch),
Monitoring(
freq=monitoring_freq,
ddout=[
nll_upper_bound,
recon_term,
kl_term,
max_phi_sig,
mean_phi_sig,
min_phi_sig,
max_prior_sig,
mean_prior_sig,
min_prior_sig,
max_theta_sig,
mean_theta_sig,
min_theta_sig,
max_x,
mean_x,
min_x,
max_theta_mu,
mean_theta_mu,
min_theta_mu,
],
data=[Iterator(valid_data, batch_size)],
),
Picklize(freq=monitoring_freq, path=save_path),
EarlyStopping(freq=monitoring_freq, path=save_path, channel=channel_name),
WeightNorm(),
]
mainloop = Training(
name=pkl_name,
data=Iterator(train_data, batch_size),
model=model,
optimizer=optimizer,
cost=nll_upper_bound,
outputs=[nll_upper_bound],
extension=extension,
)
mainloop.run()
示例4: main
#.........这里部分代码省略.........
m_theta_sig_temp = theta_sig.fprop([m_theta_4_temp], params)
m_coeff_temp = coeff.fprop([m_theta_4_temp], params)
m_kl_temp = KLGaussianGaussian(m_phi_mu_temp, m_phi_sig_temp, m_prior_mu_temp, m_prior_sig_temp)
m_x_shape = m_x.shape
m_x_in = m_x.reshape((m_x_shape[0]*m_x_shape[1], -1))
m_theta_mu_in = m_theta_mu_temp.reshape((m_x_shape[0]*m_x_shape[1], -1))
m_theta_sig_in = m_theta_sig_temp.reshape((m_x_shape[0]*m_x_shape[1], -1))
m_coeff_in = m_coeff_temp.reshape((m_x_shape[0]*m_x_shape[1], -1))
m_recon = GMM(m_x_in, m_theta_mu_in, m_theta_sig_in, m_coeff_in)
m_recon_term = m_recon.mean()
m_kl_term = m_kl_temp.mean()
m_nll_upper_bound = m_recon_term + m_kl_term
m_nll_upper_bound.name = 'nll_upper_bound'
m_recon_term.name = 'recon_term'
m_kl_term.name = 'kl_term'
max_x = m_x.max()
mean_x = m_x.mean()
min_x = m_x.min()
max_x.name = 'max_x'
mean_x.name = 'mean_x'
min_x.name = 'min_x'
max_theta_mu = m_theta_mu_in.max()
mean_theta_mu = m_theta_mu_in.mean()
min_theta_mu = m_theta_mu_in.min()
max_theta_mu.name = 'max_theta_mu'
mean_theta_mu.name = 'mean_theta_mu'
min_theta_mu.name = 'min_theta_mu'
max_theta_sig = m_theta_sig_in.max()
mean_theta_sig = m_theta_sig_in.mean()
min_theta_sig = m_theta_sig_in.min()
max_theta_sig.name = 'max_theta_sig'
mean_theta_sig.name = 'mean_theta_sig'
min_theta_sig.name = 'min_theta_sig'
max_phi_sig = m_phi_sig_temp.max()
mean_phi_sig = m_phi_sig_temp.mean()
min_phi_sig = m_phi_sig_temp.min()
max_phi_sig.name = 'max_phi_sig'
mean_phi_sig.name = 'mean_phi_sig'
min_phi_sig.name = 'min_phi_sig'
max_prior_sig = m_prior_sig_temp.max()
mean_prior_sig = m_prior_sig_temp.mean()
min_prior_sig = m_prior_sig_temp.min()
max_prior_sig.name = 'max_prior_sig'
mean_prior_sig.name = 'mean_prior_sig'
min_prior_sig.name = 'min_prior_sig'
model.inputs = [x]
model.params = params
model.nodes = nodes
model.set_updates(shared_updates)
optimizer = Adam(
lr=lr
)
monitor_fn = theano.function(inputs=[m_x],
outputs=[m_nll_upper_bound, m_recon_term, m_kl_term,
max_phi_sig, mean_phi_sig, min_phi_sig,
max_prior_sig, mean_prior_sig, min_prior_sig,
max_theta_sig, mean_theta_sig, min_theta_sig,
max_x, mean_x, min_x,
max_theta_mu, mean_theta_mu, min_theta_mu],
on_unused_input='ignore')
extension = [
GradientClipping(batch_size=batch_size, check_nan=1),
EpochCount(epoch),
Monitoring(freq=monitoring_freq,
monitor_fn=monitor_fn,
ddout=[m_nll_upper_bound, m_recon_term, m_kl_term,
max_phi_sig, mean_phi_sig, min_phi_sig,
max_prior_sig, mean_prior_sig, min_prior_sig,
max_theta_sig, mean_theta_sig, min_theta_sig,
max_x, mean_x, min_x,
max_theta_mu, mean_theta_mu, min_theta_mu],
data=[Iterator(train_data, m_batch_size, start=0, end=112640),
Iterator(valid_data, m_batch_size, start=2040064, end=2152704)]),
Picklize(freq=monitoring_freq, force_save_freq=force_saving_freq, path=save_path),
EarlyStopping(freq=monitoring_freq, force_save_freq=force_saving_freq, path=save_path, channel=channel_name),
WeightNorm()
]
mainloop = Training(
name=pkl_name,
data=Iterator(train_data, batch_size, start=0, end=2040064),
model=model,
optimizer=optimizer,
cost=nll_upper_bound,
outputs=[nll_upper_bound],
extension=extension
)
mainloop.run()
示例5: main
#.........这里部分代码省略.........
max_theta_sig = m_theta_sig_temp.max()
mean_theta_sig = m_theta_sig_temp.mean()
min_theta_sig = m_theta_sig_temp.min()
max_theta_sig.name = "max_theta_sig"
mean_theta_sig.name = "mean_theta_sig"
min_theta_sig.name = "min_theta_sig"
max_phi_sig = m_phi_sig_temp.max()
mean_phi_sig = m_phi_sig_temp.mean()
min_phi_sig = m_phi_sig_temp.min()
max_phi_sig.name = "max_phi_sig"
mean_phi_sig.name = "mean_phi_sig"
min_phi_sig.name = "min_phi_sig"
max_prior_sig = m_prior_sig_temp.max()
mean_prior_sig = m_prior_sig_temp.mean()
min_prior_sig = m_prior_sig_temp.min()
max_prior_sig.name = "max_prior_sig"
mean_prior_sig.name = "mean_prior_sig"
min_prior_sig.name = "min_prior_sig"
model.inputs = [x]
model.params = params
model.nodes = nodes
model.set_updates(shared_updates)
optimizer = Adam(lr=lr)
monitor_fn = theano.function(
inputs=[m_x],
outputs=[
m_nll_upper_bound,
m_recon_term,
m_kl_term,
max_phi_sig,
mean_phi_sig,
min_phi_sig,
max_prior_sig,
mean_prior_sig,
min_prior_sig,
max_theta_sig,
mean_theta_sig,
min_theta_sig,
max_x,
mean_x,
min_x,
max_theta_mu,
mean_theta_mu,
min_theta_mu,
],
on_unused_input="ignore",
)
extension = [
GradientClipping(batch_size=batch_size, check_nan=1),
EpochCount(epoch),
Monitoring(
freq=monitoring_freq,
monitor_fn=monitor_fn,
ddout=[
m_nll_upper_bound,
m_recon_term,
m_kl_term,
max_phi_sig,
mean_phi_sig,
min_phi_sig,
max_prior_sig,
mean_prior_sig,
min_prior_sig,
max_theta_sig,
mean_theta_sig,
min_theta_sig,
max_x,
mean_x,
min_x,
max_theta_mu,
mean_theta_mu,
min_theta_mu,
],
data=[
Iterator(train_data, m_batch_size, start=0, end=112640),
Iterator(valid_data, m_batch_size, start=2040064, end=2152704),
],
),
Picklize(freq=monitoring_freq, force_save_freq=force_saving_freq, path=save_path),
EarlyStopping(freq=monitoring_freq, force_save_freq=force_saving_freq, path=save_path, channel=channel_name),
WeightNorm(),
]
mainloop = Training(
name=pkl_name,
data=KIter(train_data, batch_size, start=0, end=2040064),
model=model,
optimizer=optimizer,
cost=nll_upper_bound,
outputs=[nll_upper_bound],
extension=extension,
)
mainloop.run()