当前位置: 首页>>代码示例>>Python>>正文


Python FullyConnectedLayer.fprop方法代码示例

本文整理汇总了Python中cle.cle.layers.feedforward.FullyConnectedLayer.fprop方法的典型用法代码示例。如果您正苦于以下问题:Python FullyConnectedLayer.fprop方法的具体用法?Python FullyConnectedLayer.fprop怎么用?Python FullyConnectedLayer.fprop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cle.cle.layers.feedforward.FullyConnectedLayer的用法示例。


在下文中一共展示了FullyConnectedLayer.fprop方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]

#.........这里部分代码省略.........
                                    unit='softplus',
                                    cons=1e-4,
                                    init_W=init_W,
                                    init_b=init_b_sig)

    corr = FullyConnectedLayer(name='corr',
                                parent=['theta_1'],
                                parent_dim=[s2x_dim],
                                nout=1,
                                unit='tanh',
                                init_W=init_W,
                                init_b=init_b)

    binary = FullyConnectedLayer(name='binary',
                                parent=['theta_1'],
                                parent_dim=[s2x_dim],
                                nout=1,
                                unit='sigmoid',
                                init_W=init_W,
                                init_b=init_b)

    nodes = [rnn, x_1, theta_1, theta_mu, theta_sig, corr, binary]

    params = OrderedDict()

    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())

    params = init_tparams(params)

    s_0 = rnn.get_init_state(batch_size)

    x_1_temp = x_1.fprop([x], params)


    def inner_fn(x_t, s_tm1):

        s_t = rnn.fprop([[x_t], [s_tm1]], params)

        return s_t

    ((s_temp), updates) = theano.scan(fn=inner_fn,
                                      sequences=[x_1_temp],
                                      outputs_info=[s_0])

    for k, v in updates.iteritems():
        k.default_update = v

    s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)
    theta_1_temp = theta_1.fprop([s_temp], params)
    theta_mu_temp = theta_mu.fprop([theta_1_temp], params)
    theta_sig_temp = theta_sig.fprop([theta_1_temp], params)
    corr_temp = corr.fprop([theta_1_temp], params)
    binary_temp = binary.fprop([theta_1_temp], params)

    x_shape = x.shape
    x_in = x.reshape((x_shape[0]*x_shape[1], -1))
    theta_mu_in = theta_mu_temp.reshape((x_shape[0]*x_shape[1], -1))
    theta_sig_in = theta_sig_temp.reshape((x_shape[0]*x_shape[1], -1))
    corr_in = corr_temp.reshape((x_shape[0]*x_shape[1], -1))
    binary_in = binary_temp.reshape((x_shape[0]*x_shape[1], -1))

    recon = BiGauss(x_in, theta_mu_in, theta_sig_in, corr_in, binary_in)
    recon = recon.reshape((x_shape[0], x_shape[1]))
    recon = recon * mask
开发者ID:xzhang311,项目名称:nips2015_vrnn,代码行数:70,代码来源:rnn_gauss.py

示例2: main

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]

#.........这里部分代码省略.........

    coeff = FullyConnectedLayer(name='coeff',
                                parent=['theta_4'],
                                parent_dim=[p_x_dim],
                                nout=k,
                                unit='softmax',
                                init_W=init_W,
                                init_b=init_b)

    nodes = [rnn,
             x_1, x_2, x_3, x_4,
             z_1, z_2, z_3, z_4,
             phi_1, phi_2, phi_3, phi_4, phi_mu, phi_sig,
             prior_1, prior_2, prior_3, prior_4, prior_mu, prior_sig,
             theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig, coeff]

    params = OrderedDict()
    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())
    params = init_tparams(params)

    step_count = sharedX(0, name='step_count')
    last_rnn = np.zeros((batch_size, rnn_dim*2), dtype=theano.config.floatX)
    rnn_tm1 = sharedX(last_rnn, name='rnn_tm1')
    shared_updates = OrderedDict()
    shared_updates[step_count] = step_count + 1

    s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0),
                   rnn.get_init_state(batch_size), rnn_tm1)

    x_shape = x.shape
    x_in = x.reshape((x_shape[0]*x_shape[1], -1))
    x_1_in = x_1.fprop([x_in], params)
    x_2_in = x_2.fprop([x_1_in], params)
    x_3_in = x_3.fprop([x_2_in], params)
    x_4_in = x_4.fprop([x_3_in], params)
    x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))


    def inner_fn(x_t, s_tm1):

        phi_1_t = phi_1.fprop([x_t, s_tm1], params)
        phi_2_t = phi_2.fprop([phi_1_t], params)
        phi_3_t = phi_3.fprop([phi_2_t], params)
        phi_4_t = phi_4.fprop([phi_3_t], params)
        phi_mu_t = phi_mu.fprop([phi_4_t], params)
        phi_sig_t = phi_sig.fprop([phi_4_t], params)

        prior_1_t = prior_1.fprop([s_tm1], params)
        prior_2_t = prior_2.fprop([prior_1_t], params)
        prior_3_t = prior_3.fprop([prior_2_t], params)
        prior_4_t = prior_4.fprop([prior_3_t], params)
        prior_mu_t = prior_mu.fprop([prior_4_t], params)
        prior_sig_t = prior_sig.fprop([prior_4_t], params)

        z_t = Gaussian_sample(phi_mu_t, phi_sig_t)

        z_1_t = z_1.fprop([z_t], params)
        z_2_t = z_2.fprop([z_1_t], params)
        z_3_t = z_3.fprop([z_2_t], params)
        z_4_t = z_4.fprop([z_3_t], params)

        s_t = rnn.fprop([[x_t, z_4_t], [s_tm1]], params)

        return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_4_t
开发者ID:kastnerkyle,项目名称:nips2015_vrnn,代码行数:70,代码来源:vrnn_gmm.py

示例3: flatten

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
params = flatten([node.get_params().values() for node in nodes])

step_count = sharedX(0, name='step_count')
last_main_lstm = np.zeros((batch_size, main_lstm_dim*2), dtype=theano.config.floatX)
main_lstm_tm1 = sharedX(last_main_lstm, name='main_lstm_tm1')
update_list = [step_count, main_lstm_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
s_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                     T.cast(T.eq(T.sum(main_lstm_tm1), 0.), 'int32')),
               main_lstm.get_init_state(batch_size), main_lstm_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))


def inner_fn(x_t, s_tm1):

    phi_1_t = phi_1.fprop([x_t, s_tm1])
    phi_2_t = phi_2.fprop([phi_1_t])
    phi_3_t = phi_3.fprop([phi_2_t])
    phi_4_t = phi_4.fprop([phi_3_t])
    phi_mu_t = phi_mu.fprop([phi_4_t])
    phi_sig_t = phi_sig.fprop([phi_4_t])
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:31,代码来源:m2.py

示例4: main

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]

#.........这里部分代码省略.........
    )

    binary = FullyConnectedLayer(
        name="binary", parent=["theta_1"], parent_dim=[p_x_dim], nout=1, unit="sigmoid", init_W=init_W, init_b=init_b
    )

    nodes = [
        rnn,
        x_1,
        z_1,
        phi_1,
        phi_mu,
        phi_sig,
        prior_1,
        prior_mu,
        prior_sig,
        theta_1,
        theta_mu,
        theta_sig,
        corr,
        binary,
    ]

    params = OrderedDict()

    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())

    params = init_tparams(params)

    s_0 = rnn.get_init_state(batch_size)

    x_1_temp = x_1.fprop([x], params)

    def inner_fn(x_t, s_tm1):

        phi_1_t = phi_1.fprop([x_t, s_tm1], params)
        phi_mu_t = phi_mu.fprop([phi_1_t], params)
        phi_sig_t = phi_sig.fprop([phi_1_t], params)

        prior_1_t = prior_1.fprop([s_tm1], params)
        prior_mu_t = prior_mu.fprop([prior_1_t], params)
        prior_sig_t = prior_sig.fprop([prior_1_t], params)

        z_t = Gaussian_sample(phi_mu_t, phi_sig_t)
        z_1_t = z_1.fprop([z_t], params)

        s_t = rnn.fprop([[x_t, z_1_t], [s_tm1]], params)

        return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_1_t

    ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, z_1_temp), updates) = theano.scan(
        fn=inner_fn, sequences=[x_1_temp], outputs_info=[s_0, None, None, None, None, None]
    )

    for k, v in updates.iteritems():
        k.default_update = v

    s_temp = concatenate([s_0[None, :, :], s_temp[:-1]], axis=0)
    theta_1_temp = theta_1.fprop([z_1_temp, s_temp], params)
    theta_mu_temp = theta_mu.fprop([theta_1_temp], params)
    theta_sig_temp = theta_sig.fprop([theta_1_temp], params)
    corr_temp = corr.fprop([theta_1_temp], params)
    binary_temp = binary.fprop([theta_1_temp], params)
开发者ID:vseledkin,项目名称:nips2015_vrnn,代码行数:69,代码来源:vrnn_gauss.py

示例5: flatten

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
                             init_W=init_W,
                             init_b=init_b)


# You will fill in a list of nodes
nodes = [h1, h2, d1, d2, output]

# Initalize the nodes
for node in nodes:
    node.initialize()

# Collect parameters
params = flatten([node.get_params().values() for node in nodes])

# Build the Theano computational graph
h1_out = h1.fprop([x])
d1_out = d1.fprop([h1_out])
h2_out = h2.fprop([d1_out])
d2_out = d2.fprop([h2_out])
y_hat = output.fprop([d2_out])

# Compute the cost
cost = NllMulInd(y, y_hat).mean()
err = error(predict(y_hat), y)
cost.name = 'cross_entropy'
err.name = 'error_rate'

d1.set_mode(1)
d2.set_mode(1)
mn_h1_out = h1.fprop([mn_x])
mn_h2_out = h2.fprop([mn_h1_out])
开发者ID:npow,项目名称:cle,代码行数:33,代码来源:mnist_dropout.py

示例6: init_tparams

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
    if node.initialize() is not None:
        params.update(node.initialize())
params = init_tparams(params)

step_count = sharedX(0, name='step_count')
last_rnn = np.zeros((batch_size, rnn_dim*2), dtype=theano.config.floatX)
rnn_tm1 = sharedX(last_rnn, name='rnn_tm1')
shared_updates = OrderedDict()
shared_updates[step_count] = step_count + 1

s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0),
               rnn.get_init_state(batch_size), rnn_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in], params)
x_2_in = x_2.fprop([x_1_in], params)
x_3_in = x_3.fprop([x_2_in], params)
x_4_in = x_4.fprop([x_3_in], params)
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))


def inner_fn(x_t, s_tm1):

    phi_1_t = phi_1.fprop([x_t, s_tm1], params)
    phi_2_t = phi_2.fprop([phi_1_t], params)
    phi_3_t = phi_3.fprop([phi_2_t], params)
    phi_4_t = phi_4.fprop([phi_3_t], params)
    phi_mu_t = phi_mu.fprop([phi_4_t], params)
    phi_sig_t = phi_sig.fprop([phi_4_t], params)
开发者ID:LEONOB2014,项目名称:nips2015_vrnn,代码行数:32,代码来源:vrnn_gmm.py

示例7: Gaussian

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
    return enc_t, dec_t, phi_mu_t, phi_sig_t

((enc_t, dec_t, phi_mu_t, phi_sig_t), updates) =\
    theano.scan(fn=inner_fn,
                sequences=[x, x_tm1],
                outputs_info=[enc_0, dec_0, None, None])

for k, v in updates.iteritems():
    k.default_update = v

encoder_tm1 = enc_t[-1]
decoder_tm1 = dec_t[-1]

dec_shape = dec_t.shape
dec_in = dec_t.reshape((dec_shape[0]*dec_shape[1], -1))
theta_mu_in = theta_mu.fprop([dec_in])
theta_sig_in = theta_sig.fprop([dec_in])

z_shape = phi_mu_t.shape
phi_mu_in = phi_mu_t.reshape((z_shape[0]*z_shape[1], -1))
phi_sig_in = phi_sig_t.reshape((z_shape[0]*z_shape[1], -1))
kl_in = kl.fprop([phi_mu_in, phi_sig_in])
kl_t = kl_in.reshape((z_shape[0], z_shape[1]))

recon = Gaussian(x_in, theta_mu_in, theta_sig_in)
recon = recon.reshape((x_shape[0], x_shape[1]))
recon_term = recon.mean()
kl_term = kl_t.mean()
nll_lower_bound = recon_term + kl_term
nll_lower_bound.name = 'nll_lower_bound'
recon_term.name = 'recon_term'
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:storn0_orig.py

示例8: inner_fn

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
update_list = [step_count, lstm_1_tm1, lstm_2_tm1, lstm_3_tm1]

step_count = T.switch(T.le(step_count, reset_freq), step_count + 1, 0)
s_1_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_1_tm1), 0.), 'int32')),
                 lstm_1.get_init_state(batch_size), lstm_1_tm1)
s_2_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_2_tm1), 0.), 'int32')),
                 lstm_2.get_init_state(batch_size), lstm_2_tm1)
s_3_0 = T.switch(T.or_(T.cast(T.eq(step_count, 0), 'int32'),
                       T.cast(T.eq(T.sum(lstm_3_tm1), 0.), 'int32')),
                 lstm_3.get_init_state(batch_size), lstm_3_tm1)

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_5_in = x_5.fprop([x_4_in])
x_6_in = x_6.fprop([x_5_in])
x_6_in = x_6_in.reshape((x_shape[0], x_shape[1], -1))


def inner_fn(x_t, s_tm1):

    phi_1_t = phi_1.fprop([x_t, s_3_tm1])
    phi_2_t = phi_2.fprop([phi_1_t])
    phi_3_t = phi_3.fprop([phi_2_t])
    phi_4_t = phi_4.fprop([phi_3_t])
    phi_mu_t = phi_mu.fprop([phi_4_t])
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:deep_m3.py

示例9: flatten

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
    theta_4,
    theta_mu,
    theta_sig,
]

for node in nodes:
    node.initialize()

params = flatten([node.get_params().values() for node in nodes])

enc_0 = encoder.get_init_state(batch_size)
dec_0 = decoder.get_init_state(batch_size)

x_shape = x.shape
x_in = x.reshape((x_shape[0] * x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))

x_tm1_shape = x_tm1.shape
x_in_tm1 = x_tm1.reshape((x_tm1_shape[0] * x_tm1_shape[1], -1))
x_1_in_tm1 = x_1.fprop([x_in_tm1])
x_2_in_tm1 = x_2.fprop([x_1_in_tm1])
x_3_in_tm1 = x_3.fprop([x_2_in_tm1])
x_4_in_tm1 = x_4.fprop([x_3_in_tm1])
x_4_in_tm1 = x_4_in_tm1.reshape((x_shape[0], x_shape[1], -1))


def inner_fn(x_t, x_tm1, enc_tm1, dec_tm1):
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:storn0_2.py

示例10: OrderedDict

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
                             init_b=init_b)


# You will fill in a list of nodes
nodes = [h1, output]

# Initalize the nodes
params = OrderedDict()
for node in nodes:
    params.update(node.initialize())
params = init_tparams(params)
nparams = add_noise_params(params, std_dev=std_dev)

# Build the Theano computational graph
d_x = inp_scale * dropout(x, p=inp_p)
h1_out = h1.fprop([d_x], nparams)
d1_out = int_scale * dropout(h1_out, p=int_p)
y_hat = output.fprop([d1_out], nparams)

# Compute the cost
cost = NllMulInd(y, y_hat).mean()
err = error(predict(y_hat), y)
cost.name = 'cross_entropy'
err.name = 'error_rate'

# Seperate computational graph to compute monitoring values without
# considering the noising processes
m_h1_out = h1.fprop([x], params)
m_y_hat = output.fprop([m_h1_out], params)

m_cost = NllMulInd(y, m_y_hat).mean()
开发者ID:Beronx86,项目名称:cle,代码行数:33,代码来源:mnist_dropout.py

示例11: flatten

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
                               unit='linear',
                               init_W=init_W,
                               init_b=init_b)

nodes = [x_1, theta_mu]

for node in nodes:
    node.initialize()

params = flatten([node.get_params().values() for node in nodes])


x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))

x_1_in = x_1.fprop([x_in])
theta_mu_in = theta_mu.fprop([x_1_in])

recon = 0.5*(x_in-theta_mu_in)**2
recon_term = recon.mean()
# TODO: what should be the reconstructed signal? theta_mu or sample?

spec_recon = spectral_magnitude_log_distance_error(x_in, theta_mu_in)
spec_recon_term = spec_recon.mean()
spec_recon_term.name = 'spec_recon_term'
cost = recon_term + spec_recon_term
recon_term.name = 'recon_term'
cost.name = 'cost'


"""
开发者ID:soroushmehr,项目名称:BP-FFT,代码行数:33,代码来源:test_cufft.py

示例12: OrderedDict

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
params = OrderedDict()
for node in nodes:
    if node.initialize() is not None:
        params.update(node.initialize())
params = init_tparams(params)

step_count = sharedX(0, name='step_count')
last_rnn = np.zeros((batch_size, rnn_dim*2), dtype=theano.config.floatX)
rnn_tm1 = sharedX(last_rnn, name='rnn_tm1')
shared_updates = OrderedDict()
shared_updates[step_count] = step_count + 1

s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0),
               rnn.get_init_state(batch_size), rnn_tm1)

x_1_temp = x_1.fprop([x], params)
x_2_temp = x_2.fprop([x_1_temp], params)
x_3_temp = x_3.fprop([x_2_temp], params)
x_4_temp = x_4.fprop([x_3_temp], params)


def inner_fn(x_t, s_tm1):

    s_t = rnn.fprop([[x_t], [s_tm1]], params)

    return s_t

(s_temp, updates) = theano.scan(fn=inner_fn,
                                sequences=[x_4_temp],
                                outputs_info=[s_0])
开发者ID:LEONOB2014,项目名称:nips2015_vrnn,代码行数:32,代码来源:rnn_gmm.py

示例13: main

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]

#.........这里部分代码省略.........
        phi_4,
        phi_mu,
        phi_sig,
        prior_1,
        prior_2,
        prior_3,
        prior_4,
        prior_mu,
        prior_sig,
        theta_1,
        theta_2,
        theta_3,
        theta_4,
        theta_mu,
        theta_sig,
    ]

    params = OrderedDict()

    for node in nodes:
        if node.initialize() is not None:
            params.update(node.initialize())

    params = init_tparams(params)

    step_count = sharedX(0, name="step_count")
    last_rnn = np.zeros((batch_size, rnn_dim * 2), dtype=theano.config.floatX)
    rnn_tm1 = sharedX(last_rnn, name="rnn_tm1")
    shared_updates = OrderedDict()
    shared_updates[step_count] = step_count + 1

    s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0), rnn.get_init_state(batch_size), rnn_tm1)

    x_1_temp = x_1.fprop([x], params)
    x_2_temp = x_2.fprop([x_1_temp], params)
    x_3_temp = x_3.fprop([x_2_temp], params)
    x_4_temp = x_4.fprop([x_3_temp], params)

    def inner_fn(x_t, s_tm1):

        phi_1_t = phi_1.fprop([x_t, s_tm1], params)
        phi_2_t = phi_2.fprop([phi_1_t], params)
        phi_3_t = phi_3.fprop([phi_2_t], params)
        phi_4_t = phi_4.fprop([phi_3_t], params)
        phi_mu_t = phi_mu.fprop([phi_4_t], params)
        phi_sig_t = phi_sig.fprop([phi_4_t], params)

        prior_1_t = prior_1.fprop([s_tm1], params)
        prior_2_t = prior_2.fprop([prior_1_t], params)
        prior_3_t = prior_3.fprop([prior_2_t], params)
        prior_4_t = prior_4.fprop([prior_3_t], params)
        prior_mu_t = prior_mu.fprop([prior_4_t], params)
        prior_sig_t = prior_sig.fprop([prior_4_t], params)

        z_t = Gaussian_sample(phi_mu_t, phi_sig_t)

        z_1_t = z_1.fprop([z_t], params)
        z_2_t = z_2.fprop([z_1_t], params)
        z_3_t = z_3.fprop([z_2_t], params)
        z_4_t = z_4.fprop([z_3_t], params)

        s_t = rnn.fprop([[x_t, z_4_t], [s_tm1]], params)

        return s_t, phi_mu_t, phi_sig_t, prior_mu_t, prior_sig_t, z_4_t, z_t

    ((s_temp, phi_mu_temp, phi_sig_temp, prior_mu_temp, prior_sig_temp, z_4_temp, z_t), updates) = theano.scan(
开发者ID:szcom,项目名称:nips2015_vrnn,代码行数:70,代码来源:vrnn_gauss_alt_nll.py

示例14: OrderedDict

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
params = OrderedDict()
for node in nodes:
    if node.initialize() is not None:
        params.update(node.initialize())
params = init_tparams(params)

step_count = sharedX(0, name='step_count')
last_rnn = np.zeros((batch_size, rnn_dim*2), dtype=theano.config.floatX)
rnn_tm1 = sharedX(last_rnn, name='rnn_tm1')
shared_updates = OrderedDict()
shared_updates[step_count] = step_count + 1

s_0 = T.switch(T.eq(T.mod(step_count, reset_freq), 0),
               rnn.get_init_state(batch_size), rnn_tm1)

x_1_temp = x_1.fprop([x], params)
x_2_temp = x_2.fprop([x_1_temp], params)
x_3_temp = x_3.fprop([x_2_temp], params)
x_4_temp = x_4.fprop([x_3_temp], params)


def inner_fn(x_t, s_tm1):

    phi_1_t = phi_1.fprop([x_t, s_tm1], params)
    phi_2_t = phi_2.fprop([phi_1_t], params)
    phi_3_t = phi_3.fprop([phi_2_t], params)
    phi_4_t = phi_4.fprop([phi_3_t], params)
    phi_mu_t = phi_mu.fprop([phi_4_t], params)
    phi_sig_t = phi_sig.fprop([phi_4_t], params)

    prior_1_t = prior_1.fprop([s_tm1], params)
开发者ID:LEONOB2014,项目名称:nips2015_vrnn,代码行数:33,代码来源:vrnn_gauss.py

示例15: flatten

# 需要导入模块: from cle.cle.layers.feedforward import FullyConnectedLayer [as 别名]
# 或者: from cle.cle.layers.feedforward.FullyConnectedLayer import fprop [as 别名]
                            unit='softmax',
                            init_W=init_W,
                            init_b=init_b)

nodes = [main_lstm,
         x_1, x_2, x_3, x_4,
         theta_1, theta_2, theta_3, theta_4, theta_mu, theta_sig, coeff]

for node in nodes:
    node.initialize()

params = flatten([node.get_params().values() for node in nodes])

x_shape = x.shape
x_in = x.reshape((x_shape[0]*x_shape[1], -1))
x_1_in = x_1.fprop([x_in])
x_2_in = x_2.fprop([x_1_in])
x_3_in = x_3.fprop([x_2_in])
x_4_in = x_4.fprop([x_3_in])
x_4_in = x_4_in.reshape((x_shape[0], x_shape[1], -1))
s_0 = main_lstm.get_init_state(batch_size)


def inner_fn(x_t, s_tm1):

    s_t = main_lstm.fprop([[x_t], [s_tm1]])

    return s_t

(s_t, updates) = theano.scan(fn=inner_fn,
                             sequences=[x_4_in],
开发者ID:anirudh9119,项目名称:SpeechSyn,代码行数:33,代码来源:m1.py


注:本文中的cle.cle.layers.feedforward.FullyConnectedLayer.fprop方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。