当前位置: 首页>>代码示例>>Python>>正文


Python LSTM.get_init_state方法代码示例

本文整理汇总了Python中cle.cle.layers.recurrent.LSTM.get_init_state方法的典型用法代码示例。如果您正苦于以下问题:Python LSTM.get_init_state方法的具体用法?Python LSTM.get_init_state怎么用?Python LSTM.get_init_state使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cle.cle.layers.recurrent.LSTM的用法示例。


在下文中一共展示了LSTM.get_init_state方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]

#.........这里部分代码省略.........
                                    unit='softplus',
                                    cons=1e-4,
                                    init_W=init_W,
                                    init_b=init_b_sig)

    coeff = FullyConnectedLayer(name='coeff',
                                parent=['theta_4'],
                                parent_dim=[p_x_dim],
                                nout=k,
                                unit='softmax',
                                init_W=init_W,
                                init_b=init_b)

    nodes = [rnn,
             x_1, x_2, x_3, x_4,
             z_1, z_2, z_3, z_4,
             phi_1, phi_2, phi_3, phi_4, phi_mu, phi_sig,
             prior_1, prior_2, prior_3, prior_4, prior_mu, prior_sig,
             theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig, coeff]

    params = OrderedDict()
    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())
    params = init_tparams(params)

    step_count = sharedX(0, name='step_count')
    last_rnn = np.zeros((batch_size, rnn_dim*2), dtype=theano.config.floatX)
    rnn_tm1 = sharedX(last_rnn, name='rnn_tm1')
    shared_updates = OrderedDict()
    shared_updates[step_count] = step_count + 1

    s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0),
                   rnn.get_init_state(batch_size), rnn_tm1)

    x_shape = x.shape
    x_in = x.reshape((x_shape[0]*x_shape[1], -1))
    x_1_in = x_1.fprop([x_in], params)
    x_2_in = x_2.fprop([x_1_in], params)
    x_3_in = x_3.fprop([x_2_in], params)
    x_4_in = x_4.fprop([x_3_in], params)
    x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))


    def inner_fn(x_t, s_tm1):

        phi_1_t = phi_1.fprop([x_t, s_tm1], params)
        phi_2_t = phi_2.fprop([phi_1_t], params)
        phi_3_t = phi_3.fprop([phi_2_t], params)
        phi_4_t = phi_4.fprop([phi_3_t], params)
        phi_mu_t = phi_mu.fprop([phi_4_t], params)
        phi_sig_t = phi_sig.fprop([phi_4_t], params)

        prior_1_t = prior_1.fprop([s_tm1], params)
        prior_2_t = prior_2.fprop([prior_1_t], params)
        prior_3_t = prior_3.fprop([prior_2_t], params)
        prior_4_t = prior_4.fprop([prior_3_t], params)
        prior_mu_t = prior_mu.fprop([prior_4_t], params)
        prior_sig_t = prior_sig.fprop([prior_4_t], params)

        z_t = Gaussian_sample(phi_mu_t, phi_sig_t)

        z_1_t = z_1.fprop([z_t], params)
        z_2_t = z_2.fprop([z_1_t], params)
        z_3_t = z_3.fprop([z_2_t], params)
        z_4_t = z_4.fprop([z_3_t], params)
开发者ID:kastnerkyle,项目名称:nips2015_vrnn,代码行数:70,代码来源:vrnn_gmm.py

示例2: flatten

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
         theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig]

for node in nodes:
    node.initialize()

params = flatten([node.get_params().values() for node in nodes])

step_count = sharedX(0, name='step_count')
last_main_lstm = np.zeros((batch_size, main_lstm_dim*2), dtype=theano.config.floatX)
main_lstm_tm1 = sharedX(last_main_lstm, name='main_lstm_tm1')
update_list = [step_count, main_lstm_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
s_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                     T.cast(T.eq(T.sum(main_lstm_tm1), 0.), 'int32')),
               main_lstm.get_init_state(batch_size), main_lstm_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))


def inner_fn(x_t, s_tm1):

    phi_1_t = phi_1.fprop([x_t, s_tm1])
    phi_2_t = phi_2.fprop([phi_1_t])
    phi_3_t = phi_3.fprop([phi_2_t])
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:m2.py

示例3: flatten

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
for node in nodes:
    node.initialize()

params = flatten([node.get_params().values() for node in nodes])

step_count = sharedX(0, name='step_count')
last_encoder = np.zeros((batch_size, encoder_dim*2), dtype=theano.config.floatX)
last_decoder = np.zeros((batch_size, decoder_dim*2), dtype=theano.config.floatX)
encoder_tm1 = sharedX(last_encoder, name='encoder_tm1')
decoder_tm1 = sharedX(last_decoder, name='decoder_tm1')
update_list = [step_count, encoder_tm1, decoder_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
enc_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(encoder_tm1), 0.), 'int32')),
                 encoder.get_init_state(batch_size), encoder_tm1)
dec_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(decoder_tm1), 0.), 'int32')),
                 decoder.get_init_state(batch_size), decoder_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))


def inner_fn(x_t, x_tm1, enc_tm1, dec_tm1):

    enc_t = encoder.fprop([[x_t], [enc_tm1]])

    phi_mu_t = phi_mu.fprop([enc_t])
    phi_sig_t = phi_sig.fprop([enc_t])
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:32,代码来源:storn0_orig.py

示例4: main

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]

#.........这里部分代码省略.........
    corr = FullyConnectedLayer(
        name="corr", parent=["theta_1"], parent_dim=[p_x_dim], nout=1, unit="tanh", init_W=init_W, init_b=init_b
    )

    binary = FullyConnectedLayer(
        name="binary", parent=["theta_1"], parent_dim=[p_x_dim], nout=1, unit="sigmoid", init_W=init_W, init_b=init_b
    )

    nodes = [
        rnn,
        x_1,
        z_1,
        phi_1,
        phi_mu,
        phi_sig,
        prior_1,
        prior_mu,
        prior_sig,
        theta_1,
        theta_mu,
        theta_sig,
        corr,
        binary,
    ]

    params = OrderedDict()

    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())

    params = init_tparams(params)

    s_0 = rnn.get_init_state(batch_size)

    x_1_temp = x_1.fprop([x], params)

    def inner_fn(x_t, s_tm1):

        phi_1_t = phi_1.fprop([x_t, s_tm1], params)
        phi_mu_t = phi_mu.fprop([phi_1_t], params)
        phi_sig_t = phi_sig.fprop([phi_1_t], params)

        prior_1_t = prior_1.fprop([s_tm1], params)
        prior_mu_t = prior_mu.fprop([prior_1_t], params)
        prior_sig_t = prior_sig.fprop([prior_1_t], params)

        z_t = Gaussian_sample(phi_mu_t, phi_sig_t)
        z_1_t = z_1.fprop([z_t], params)

        s_t = rnn.fprop([[x_t, z_1_t], [s_tm1]], params)

        return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_1_t

    ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, z_1_temp), updates) = theano.scan(
        fn=inner_fn, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None, None]
    )

    for k, v in updates.iteritems():
        k.default_update = v

    s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)
    theta_1_temp = theta_1.fprop([z_1_temp, s_temp], params)
    theta_mu_temp = theta_mu.fprop([theta_1_temp], params)
    theta_sig_temp = theta_sig.fprop([theta_1_temp], params)
    corr_temp = corr.fprop([theta_1_temp], params)
开发者ID:vseledkin,项目名称:nips2015_vrnn,代码行数:70,代码来源:vrnn_gauss.py

示例5: BiGMM

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
    theta_sig_t = theta_sig.fprop([theta_4_t])
    coeff_t = coeff.fprop([theta_4_t])

    x_1_t = x_1.fprop([x_t])
    x_2_t = x_2.fprop([x_1_t])
    x_3_t = x_3.fprop([x_2_t])
    x_4_t = x_4.fprop([x_3_t])
 
    s_t = main_lstm.fprop([[x_4_t], [s_tm1]])

    return s_t, theta_mu_t, theta_sig_t, coeff_t

((s_t, theta_mu_t, theta_sig_t, coeff_t), updates) =\
    theano.scan(fn=inner_fn,
                sequences=[x],
                outputs_info=[main_lstm.get_init_state(batch_size),
                              None, None, None])

for k, v in updates.iteritems():
    k.default_update = v

reshaped_x = x.reshape((x.shape[0]*x.shape[1], -1))
reshaped_theta_mu = theta_mu_t.reshape((theta_mu_t.shape[0]*theta_mu_t.shape[1], -1))
reshaped_theta_sig = theta_sig_t.reshape((theta_sig_t.shape[0]*theta_sig_t.shape[1], -1))
reshaped_coeff = coeff_t.reshape((coeff_t.shape[0]*coeff_t.shape[1], -1))

recon = BiGMM(reshaped_x, reshaped_theta_mu, reshaped_theta_sig, reshaped_coeff)
recon = recon.reshape((theta_mu_t.shape[0], theta_mu_t.shape[1]))
recon = recon * mask
recon_term = recon.sum()
recon_term.name = 'nll'
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:m0.py

示例6: flatten

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
params = flatten([node.get_params().values() for node in nodes])

step_count = sharedX(0, name='step_count')
last_lstm_1 = np.zeros((batch_size, lstm_1_dim*2), dtype=theano.config.floatX)
last_lstm_2 = np.zeros((batch_size, lstm_2_dim*2), dtype=theano.config.floatX)
last_lstm_3 = np.zeros((batch_size, lstm_3_dim*2), dtype=theano.config.floatX)
lstm_1_tm1 = sharedX(last_lstm_1, name='lstm_1_tm1')
lstm_2_tm1 = sharedX(last_lstm_2, name='lstm_2_tm1')
lstm_3_tm1 = sharedX(last_lstm_3, name='lstm_3_tm1')
update_list = [step_count, lstm_1_tm1, lstm_2_tm1, lstm_3_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
s_1_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_1_tm1), 0.), 'int32')),
                 lstm_1.get_init_state(batch_size), lstm_1_tm1)
s_2_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_2_tm1), 0.), 'int32')),
                 lstm_2.get_init_state(batch_size), lstm_2_tm1)
s_3_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_3_tm1), 0.), 'int32')),
                 lstm_3.get_init_state(batch_size), lstm_3_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:31,代码来源:deep_m2.py

示例7: flatten

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
params = flatten([node.get_params().values() for node in nodes])

step_count = sharedX(0, name='step_count')
last_lstm_1 = np.zeros((batch_size, lstm_1_dim*2), dtype=theano.config.floatX)
last_lstm_2 = np.zeros((batch_size, lstm_2_dim*2), dtype=theano.config.floatX)
last_lstm_3 = np.zeros((batch_size, lstm_3_dim*2), dtype=theano.config.floatX)
lstm_1_tm1 = sharedX(last_lstm_1, name='lstm_1_tm1')
lstm_2_tm1 = sharedX(last_lstm_2, name='lstm_2_tm1')
lstm_3_tm1 = sharedX(last_lstm_3, name='lstm_3_tm1')
update_list = [step_count, lstm_1_tm1, lstm_2_tm1, lstm_3_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
s_1_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_1_tm1), 0.), 'int32')),
                 lstm_1.get_init_state(batch_size), lstm_1_tm1)
s_2_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_2_tm1), 0.), 'int32')),
                 lstm_2.get_init_state(batch_size), lstm_2_tm1)
s_3_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_3_tm1), 0.), 'int32')),
                 lstm_3.get_init_state(batch_size), lstm_3_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))


def inner_fn(x_t, s_1_tm1, s_2_tm1, s_3_tm1):

    theta_1_t = theta_1.fprop([s_3_tm1])
    theta_2_t = theta_2.fprop([theta_1_t])
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:32,代码来源:lstm_grbm.py

示例8: main

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]

#.........这里部分代码省略.........
                                    parent_dim=[s2x_dim],
                                    nout=target_dim,
                                    unit='softplus',
                                    cons=1e-4,
                                    init_W=init_W,
                                    init_b=init_b_sig)

    corr = FullyConnectedLayer(name='corr',
                                parent=['theta_1'],
                                parent_dim=[s2x_dim],
                                nout=1,
                                unit='tanh',
                                init_W=init_W,
                                init_b=init_b)

    binary = FullyConnectedLayer(name='binary',
                                parent=['theta_1'],
                                parent_dim=[s2x_dim],
                                nout=1,
                                unit='sigmoid',
                                init_W=init_W,
                                init_b=init_b)

    nodes = [rnn, x_1, theta_1, theta_mu, theta_sig, corr, binary]

    params = OrderedDict()

    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())

    params = init_tparams(params)

    s_0 = rnn.get_init_state(batch_size)

    x_1_temp = x_1.fprop([x], params)


    def inner_fn(x_t, s_tm1):

        s_t = rnn.fprop([[x_t], [s_tm1]], params)

        return s_t

    ((s_temp), updates) = theano.scan(fn=inner_fn,
                                      sequences=[x_1_temp],
                                      outputs_info=[s_0])

    for k, v in updates.iteritems():
        k.default_update = v

    s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)
    theta_1_temp = theta_1.fprop([s_temp], params)
    theta_mu_temp = theta_mu.fprop([theta_1_temp], params)
    theta_sig_temp = theta_sig.fprop([theta_1_temp], params)
    corr_temp = corr.fprop([theta_1_temp], params)
    binary_temp = binary.fprop([theta_1_temp], params)

    x_shape = x.shape
    x_in = x.reshape((x_shape[0]*x_shape[1], -1))
    theta_mu_in = theta_mu_temp.reshape((x_shape[0]*x_shape[1], -1))
    theta_sig_in = theta_sig_temp.reshape((x_shape[0]*x_shape[1], -1))
    corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1))
    binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1))

    recon = BiGauss(x_in, theta_mu_in, theta_sig_in, corr_in, binary_in)
开发者ID:xzhang311,项目名称:nips2015_vrnn,代码行数:70,代码来源:rnn_gauss.py

示例9: flatten

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
         theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig]

for node in nodes:
    node.initialize()

params = flatten([node.get_params().values() for node in nodes])

step_count = sharedX(0, name='step_count')
last_main_lstm = np.zeros((batch_size, main_lstm_dim*2), dtype=theano.config.floatX)
main_lstm_tm1 = sharedX(last_main_lstm, name='main_lstm_tm1')
update_list = [step_count, main_lstm_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
s_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                     T.cast(T.eq(T.sum(main_lstm_tm1), 0.), 'int32')),
               main_lstm.get_init_state(batch_size), main_lstm_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))


def inner_fn(x_t, s_tm1):

    s_t = main_lstm.fprop([[x_t], [s_tm1]])

    return s_t
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:m0.py

示例10: Gaussian

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
        w_2 = Gaussian(z_2_is, prior_mu_2_t, prior_sig_2_t) -\
              Gaussian(z_2_is, phi_mu_2_t, phi_sig_2_t)
        marginal_ll.append(GMM(x_t, theta_mu_t, theta_sig_t, coeff_t) + w_1 + w_2)
    marginal_ll = T.concatenate(marginal_ll, axis=0).mean()

    return s_1_t, s_2_t, kl_1_t, kl_2_t, phi_sig_1_t, phi_sig_2_t,\
           prior_sig_1_t, prior_sig_2_t, theta_mu_t, theta_sig_t, coeff_t,\
           marginal_ll

((s_1_t, s_2_t, kl_1_t, kl_2_t, phi_sig_1_t, phi_sig_2_t,
  prior_sig_1_t, prior_sig_2_t, theta_mu_t, theta_sig_t, coeff_t,
  marginal_ll),
 updates) =\
    theano.scan(fn=inner_fn,
                sequences=[x],
                outputs_info=[coder_1.get_init_state(),
                              coder_2.get_init_state(),
                              None, None, None, None, None,
                              None, None, None, None, None])
for k, v in updates.iteritems():
    k.default_update = v

reshaped_x = x.reshape((x.shape[0]*x.shape[1], -1))
reshaped_theta_mu = theta_mu_t.reshape((theta_mu_t.shape[0]*theta_mu_t.shape[1], -1))
reshaped_theta_sig = theta_sig_t.reshape((theta_sig_t.shape[0]*theta_sig_t.shape[1], -1))
reshaped_coeff = coeff_t.reshape((coeff_t.shape[0]*coeff_t.shape[1], -1))
reshaped_mask = mask.flatten()
kl_1_term = kl_1_t.reshape((kl_1_t.shape[0]*kl_1_t.shape[1], -1))
kl_2_term = kl_2_t.reshape((kl_2_t.shape[0]*kl_2_t.shape[1], -1))
recon_term = GMM(reshaped_x, reshaped_theta_mu, reshaped_theta_sig, reshaped_coeff)
# Apply mask
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:stacked_rnnvae.py

示例11: Gaussian

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
        theta_sig_t = theta_sig.fprop([theta_emb_t])
        coeff_t = coeff.fprop([theta_emb_t])
        w = Gaussian(z_is, prior_mu_t, prior_sig_t) -\
            Gaussian(z_is, phi_mu_t, phi_sig_t)
        marginal_ll.append(GMM(x_t, theta_mu_t, theta_sig_t, coeff_t) + w)
    marginal_ll = T.concatenate(marginal_ll, axis=0).mean()

    return enc_t, dec_t, pec_t, kl_t, theta_mu_t, coeff_t, marginal_ll

prior_sig_t = prior_sig.fprop()
phi_sig_t = phi_sig.fprop()
theta_sig_t = theta_sig.fprop()
((enc_t, dec_t, pec_t, kl_t, theta_mu_t, coeff_t, marginal_ll), updates) =\
    theano.scan(fn=inner_fn,
                sequences=[x, x2],
                outputs_info=[encoder.get_init_state(),
                              decoder.get_init_state(),
                              pecoder.get_init_state(),
                              None, None, None, None],
                non_sequences=[phi_sig_t, prior_sig_t, theta_sig_t])
for k, v in updates.iteritems():
    k.default_update = v

reshaped_x = x.reshape((x.shape[0]*x.shape[1], -1))
reshaped_theta_mu = theta_mu_t.reshape((theta_mu_t.shape[0]*theta_mu_t.shape[1], -1))
reshaped_coeff = coeff_t.reshape((coeff_t.shape[0]*coeff_t.shape[1], -1))
reshaped_mask = mask.flatten()

kl_term = kl_t.reshape((kl_t.shape[0]*kl_t.shape[1], -1))
recon_term = GMM(reshaped_x, reshaped_theta_mu, theta_sig_t, reshaped_coeff)
# Apply mask
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:rnnvae_free_theta_sig_decouple.py

示例12: OrderedDict

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
         theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig, coeff]

params = OrderedDict()
for node in nodes:
    if node.initialize() is not None:
        params.update(node.initialize())
params = init_tparams(params)

step_count = sharedX(0, name='step_count')
last_rnn = np.zeros((batch_size, rnn_dim*2), dtype=theano.config.floatX)
rnn_tm1 = sharedX(last_rnn, name='rnn_tm1')
shared_updates = OrderedDict()
shared_updates[step_count] = step_count + 1

s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0),
               rnn.get_init_state(batch_size), rnn_tm1)

x_1_temp = x_1.fprop([x], params)
x_2_temp = x_2.fprop([x_1_temp], params)
x_3_temp = x_3.fprop([x_2_temp], params)
x_4_temp = x_4.fprop([x_3_temp], params)


def inner_fn(x_t, s_tm1):

    s_t = rnn.fprop([[x_t], [s_tm1]], params)

    return s_t

(s_temp, updates) = theano.scan(fn=inner_fn,
                                sequences=[x_4_temp],
开发者ID:LEONOB2014,项目名称:nips2015_vrnn,代码行数:33,代码来源:rnn_gmm.py

示例13: OrderedDict

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
                             nout=nlabel,
                             unit='sigmoid',
                             init_W=init_W,
                             init_b=init_b)

nodes = [h1, h2, h3, output]

params = OrderedDict()

for node in nodes:
    if node.initialize() is not None:
        params.update(node.initialize())

params = init_tparams(params)

s1_0 = h1.get_init_state(batch_size)
s2_0 = h2.get_init_state(batch_size)
s3_0 = h3.get_init_state(batch_size)


def inner_fn(x_t, s1_tm1, s2_tm1, s3_tm1):

    h1_t = h1.fprop([[x_t], [s1_tm1]], params)
    h2_t = h2.fprop([[h1_t], [s2_tm1]], params)
    h3_t = h3.fprop([[h2_t], [s2_tm1]], params)
    output_t = output.fprop([h1_t, h2_t, h3_t], params)

    return h1_t, h2_t, h3_t, output_t

((h1_temp, h2_temp, h3_temp, y_hat_temp), updates) =\
    theano.scan(fn=inner_fn,
开发者ID:BigeyeDestroyer,项目名称:cle,代码行数:33,代码来源:music.py

示例14: NllBin

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
    prior_out = prior.fprop([phi_mu_out, phi_sig_out])
    kl_out = kl.fprop([phi_mu_out, phi_sig_out])

    dec_out = dec.fprop([[prior_out], [dec_tm1]])

    w_out = w.fprop([dec_out])
    write_param_out = write_param.fprop([dec_out])
    write_out = write.fprop([w_out, write_param_out])

    canvas_out = canvas.fprop([[write_out], [canvas_tm1]])

    return enc_out, dec_out, canvas_out, kl_out

((enc_out, dec_out, canvas_out, kl_out), updates) =\
    theano.scan(fn=inner_fn,
                outputs_info=[enc.get_init_state(),
                              dec.get_init_state(),
                              canvas.get_init_state(),
                              None],
                non_sequences=[x],
                n_steps=n_steps)
for k, v in updates.iteritems():
    k.default_update = v

recon_term = NllBin(x, T.nnet.sigmoid(canvas_out[-1])).mean()
kl_term = kl_out.sum(axis=0).mean()
cost = recon_term + kl_term
cost.name = 'cost'
recon_term.name = 'recon_term'
kl_term.name = 'kl_term'
recon_err = ((x - T.nnet.sigmoid(canvas_out[-1]))**2).mean() / x.std()
开发者ID:anirudh9119,项目名称:cle,代码行数:33,代码来源:draw.py

示例15: GMM

# 需要导入模块: from cle.cle.layers.recurrent import LSTM [as 别名]
# 或者: from cle.cle.layers.recurrent.LSTM import get_init_state [as 别名]
    z_t = prior.fprop([phi_mu_t, phi_sig_t])
    kl_t = kl.fprop([phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t])

    theta_mu_t = theta_mu.fprop([s_tm1])
    theta_sig_t = theta_sig.fprop([s_tm1])
    coeff_t = coeff.fprop([s_tm1])

    s_t = coder.fprop([[x_t, z_t], [s_tm1]])

    return s_t, kl_t, prior_sig_t, phi_sig_t, theta_mu_t, theta_sig_t, coeff_t

((s_t, kl_t, prior_sig_t, phi_sig_t, theta_mu_t, theta_sig_t, coeff_t),
 updates) =\
    theano.scan(fn=inner_fn,
                sequences=[x],
                outputs_info=[coder.get_init_state(),
                              None, None, None, None, None, None])

for k, v in updates.iteritems():
    k.default_update = v

reshaped_x = x.reshape((x.shape[0]*x.shape[1], -1))
reshaped_theta_mu = theta_mu_t.reshape((theta_mu_t.shape[0]*theta_mu_t.shape[1], -1))
reshaped_theta_sig = theta_sig_t.reshape((theta_sig_t.shape[0]*theta_sig_t.shape[1], -1))
reshaped_coeff = coeff_t.reshape((coeff_t.shape[0]*coeff_t.shape[1], -1))
reshaped_mask = mask.flatten()

kl_term = kl_t.reshape((kl_t.shape[0]*kl_t.shape[1], -1))
recon_term = GMM(reshaped_x, reshaped_theta_mu, reshaped_theta_sig, reshaped_coeff)
# Apply mask
kl_term = kl_term[reshaped_mask.nonzero()].mean()
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:condsig.py


注:本文中的cle.cle.layers.recurrent.LSTM.get_init_state方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。