本文整理汇总了Python中cle.cle.layers.feedforward.FullyConnectedLayer类的典型用法代码示例。如果您正苦于以下问题:Python FullyConnectedLayer类的具体用法?Python FullyConnectedLayer怎么用?Python FullyConnectedLayer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FullyConnectedLayer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main(args):
trial = int(args['trial'])
pkl_name = 'rnn_gauss_%d' % trial
channel_name = 'valid_nll'
data_path = args['data_path']
save_path = args['save_path']
monitoring_freq = int(args['monitoring_freq'])
epoch = int(args['epoch'])
batch_size = int(args['batch_size'])
x_dim = int(args['x_dim'])
z_dim = int(args['z_dim'])
rnn_dim = int(args['rnn_dim'])
lr = float(args['lr'])
debug = int(args['debug'])
print "trial no. %d" % trial
print "batch size %d" % batch_size
print "learning rate %f" % lr
print "saving pkl file '%s'" % pkl_name
print "to the save path '%s'" % save_path
x2s_dim = 340
s2x_dim = 340
target_dim = x_dim - 1
model = Model()
train_data = IAMOnDB(name='train',
prep='normalize',
cond=False,
path=data_path)
X_mean = train_data.X_mean
X_std = train_data.X_std
valid_data = IAMOnDB(name='valid',
prep='normalize',
cond=False,
path=data_path,
X_mean=X_mean,
X_std=X_std)
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x, mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.
mask.tag.test_value = temp
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[x_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
rnn = LSTM(name='rnn',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=rnn_dim,
unit='tanh',
init_W=init_W,
init_U=init_U,
init_b=init_b)
theta_1 = FullyConnectedLayer(name='theta_1',
parent=['s_tm1'],
parent_dim=[rnn_dim],
nout=s2x_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
theta_mu = FullyConnectedLayer(name='theta_mu',
parent=['theta_1'],
parent_dim=[s2x_dim],
nout=target_dim,
unit='linear',
init_W=init_W,
init_b=init_b)
theta_sig = FullyConnectedLayer(name='theta_sig',
parent=['theta_1'],
parent_dim=[s2x_dim],
nout=target_dim,
unit='softplus',
cons=1e-4,
init_W=init_W,
init_b=init_b_sig)
#.........这里部分代码省略.........
示例2: main
def main(args):
trial = int(args['trial'])
pkl_name = 'vrnn_gmm_%d' % trial
channel_name = 'valid_nll_upper_bound'
data_path = args['data_path']
save_path = args['save_path']
monitoring_freq = int(args['monitoring_freq'])
force_saving_freq = int(args['force_saving_freq'])
reset_freq = int(args['reset_freq'])
epoch = int(args['epoch'])
batch_size = int(args['batch_size'])
m_batch_size = int(args['m_batch_size'])
x_dim = int(args['x_dim'])
z_dim = int(args['z_dim'])
rnn_dim = int(args['rnn_dim'])
k = int(args['num_k'])
lr = float(args['lr'])
debug = int(args['debug'])
print "trial no. %d" % trial
print "batch size %d" % batch_size
print "learning rate %f" % lr
print "saving pkl file '%s'" % pkl_name
print "to the save path '%s'" % save_path
q_z_dim = 500
p_z_dim = 500
p_x_dim = 500
x2s_dim = 500
z2s_dim = 500
target_dim = x_dim * k
file_name = 'blizzard_unseg_tbptt'
normal_params = np.load(data_path + file_name + '_normal.npz')
X_mean = normal_params['X_mean']
X_std = normal_params['X_std']
model = Model()
train_data = Blizzard_tbptt(name='train',
path=data_path,
frame_size=x_dim,
file_name=file_name,
X_mean=X_mean,
X_std=X_std)
valid_data = Blizzard_tbptt(name='valid',
path=data_path,
frame_size=x_dim,
file_name=file_name,
X_mean=X_mean,
X_std=X_std)
x = train_data.theano_vars()
m_x = valid_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=theano.config.floatX)
m_x.tag.test_value = np.zeros((15, m_batch_size, x_dim), dtype=theano.config.floatX)
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[x_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_2 = FullyConnectedLayer(name='x_2',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_3 = FullyConnectedLayer(name='x_3',
parent=['x_2'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_4 = FullyConnectedLayer(name='x_4',
parent=['x_3'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
z_1 = FullyConnectedLayer(name='z_1',
#.........这里部分代码省略.........
示例3: InitCell
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x = train_data.theano_vars()
mn_x = valid_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=theano.config.floatX)
mn_x.tag.test_value = np.zeros((15, mn_batch_size, frame_size), dtype=theano.config.floatX)
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[frame_size],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_2 = FullyConnectedLayer(name='x_2',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_3 = FullyConnectedLayer(name='x_3',
parent=['x_2'],
parent_dim=[x2s_dim],
示例4: main
def main(args):
trial = int(args["trial"])
pkl_name = "vrnn_gauss_%d" % trial
channel_name = "valid_nll_upper_bound"
data_path = args["data_path"]
save_path = args["save_path"]
monitoring_freq = int(args["monitoring_freq"])
epoch = int(args["epoch"])
batch_size = int(args["batch_size"])
x_dim = int(args["x_dim"])
z_dim = int(args["z_dim"])
rnn_dim = int(args["rnn_dim"])
lr = float(args["lr"])
debug = int(args["debug"])
print "trial no. %d" % trial
print "batch size %d" % batch_size
print "learning rate %f" % lr
print "saving pkl file '%s'" % pkl_name
print "to the save path '%s'" % save_path
q_z_dim = 150
p_z_dim = 150
p_x_dim = 250
x2s_dim = 250
z2s_dim = 150
target_dim = x_dim - 1
model = Model()
train_data = IAMOnDB(name="train", prep="normalize", cond=False, path=data_path)
X_mean = train_data.X_mean
X_std = train_data.X_std
valid_data = IAMOnDB(name="valid", prep="normalize", cond=False, path=data_path, X_mean=X_mean, X_std=X_std)
init_W = InitCell("rand")
init_U = InitCell("ortho")
init_b = InitCell("zeros")
init_b_sig = InitCell("const", mean=0.6)
x, mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.0
mask.tag.test_value = temp
x_1 = FullyConnectedLayer(
name="x_1", parent=["x_t"], parent_dim=[x_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
z_1 = FullyConnectedLayer(
name="z_1", parent=["z_t"], parent_dim=[z_dim], nout=z2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
rnn = LSTM(
name="rnn",
parent=["x_1", "z_1"],
parent_dim=[x2s_dim, z2s_dim],
nout=rnn_dim,
unit="tanh",
init_W=init_W,
init_U=init_U,
init_b=init_b,
)
phi_1 = FullyConnectedLayer(
name="phi_1",
parent=["x_1", "s_tm1"],
parent_dim=[x2s_dim, rnn_dim],
nout=q_z_dim,
unit="relu",
init_W=init_W,
init_b=init_b,
)
phi_mu = FullyConnectedLayer(
name="phi_mu", parent=["phi_1"], parent_dim=[q_z_dim], nout=z_dim, unit="linear", init_W=init_W, init_b=init_b
)
phi_sig = FullyConnectedLayer(
name="phi_sig",
parent=["phi_1"],
parent_dim=[q_z_dim],
nout=z_dim,
unit="softplus",
cons=1e-4,
init_W=init_W,
init_b=init_b_sig,
)
prior_1 = FullyConnectedLayer(
name="prior_1", parent=["s_tm1"], parent_dim=[rnn_dim], nout=p_z_dim, unit="relu", init_W=init_W, init_b=init_b
)
#.........这里部分代码省略.........
示例5: InitCell
init_b = InitCell('zeros')
# Define nodes: objects
x, y = train_data.theano_vars()
mn_x, mn_y = valid_data.theano_vars()
# You must use THEANO_FLAGS="compute_test_value=raise" python -m ipdb
if debug:
x.tag.test_value = np.zeros((batch_size, 784), dtype=np.float32)
y.tag.test_value = np.zeros((batch_size, 1), dtype=np.float32)
mn_x.tag.test_value = np.zeros((batch_size, 784), dtype=np.float32)
mn_y.tag.test_value = np.zeros((batch_size, 1), dtype=np.float32)
h1 = FullyConnectedLayer(name='h1',
parent=['x'],
parent_dim=[784],
nout=1000,
unit='relu',
init_W=init_W,
init_b=init_b)
d1 = DropoutLayer(name='d1', parent=['h1'], nout=1000)
h2 = FullyConnectedLayer(name='h2',
parent=['d1'],
parent_dim=[1000],
nout=1000,
unit='relu',
init_W=init_W,
init_b=init_b)
d2 = DropoutLayer(name='d2', parent=['h2'], nout=1000)
示例6: InitCell
X_std=X_std)
x = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=theano.config.floatX)
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[frame_size],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_2 = FullyConnectedLayer(name='x_2',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_3 = FullyConnectedLayer(name='x_3',
parent=['x_2'],
parent_dim=[x2s_dim],
示例7: PriorLayer
parent=['phi_mu', 'phi_sig'],
parent_dim=[latent_size, latent_size],
use_sample=1,
num_sample=1,
nout=latent_size)
kl = PriorLayer(name='kl',
parent=['phi_mu', 'phi_sig'],
parent_dim=[latent_size, latent_size],
use_sample=0,
nout=latent_size)
theta_mu = FullyConnectedLayer(name='theta_mu',
parent=['dec_t'],
parent_dim=[decoder_dim],
nout=target_size,
unit='linear',
init_W=init_W,
init_b=init_b)
theta_sig = FullyConnectedLayer(name='theta_sig',
parent=['dec_t'],
parent_dim=[decoder_dim],
nout=target_size,
unit='softplus',
cons=1e-4,
init_W=init_W,
init_b=init_b_sig)
nodes = [encoder, decoder, prior, kl,
phi_mu, phi_sig, theta_mu, theta_sig]
示例8: InitCell
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
#init_b_sig = InitCell('const', mean=0.6)
init_b_sig = InitCell('const')
x = train_data.theano_vars()
mn_x = valid_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=np.float32)
mn_x.tag.test_value = np.zeros((15, mn_batch_size, frame_size), dtype=np.float32)
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[frame_size],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_2 = FullyConnectedLayer(name='x_2',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_3 = FullyConnectedLayer(name='x_3',
parent=['x_2'],
parent_dim=[x2s_dim],
示例9: InitCell
init_W = InitCell("rand")
init_U = InitCell("ortho")
init_b = InitCell("zeros")
init_b_sig = InitCell("const", mean=0.6)
x, x_mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.0
x_mask.tag.test_value = temp
x_tm1 = T.concatenate([T.zeros((1, x.shape[1], x.shape[2])), x[:-1]], axis=0)
x_tm1.name = "x_tm1"
x_1 = FullyConnectedLayer(
name="x_1", parent=["x_t"], parent_dim=[frame_size], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
x_2 = FullyConnectedLayer(
name="x_2", parent=["x_1"], parent_dim=[x2s_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
x_3 = FullyConnectedLayer(
name="x_3", parent=["x_2"], parent_dim=[x2s_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
x_4 = FullyConnectedLayer(
name="x_4", parent=["x_3"], parent_dim=[x2s_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
z_1 = FullyConnectedLayer(
示例10: InitCell
# Define nodes: objects
x, y = train_data.theano_vars()
# You must use THEANO_FLAGS="compute_test_value=raise" python -m ipdb
if debug:
x.tag.test_value = np.zeros((batch_size, 784), dtype=np.float32)
y.tag.test_value = np.zeros((batch_size, 1), dtype=np.float32)
# Choose the random initialization method
init_W = InitCell('rand')
init_b = InitCell('zeros')
h1 = FullyConnectedLayer(name='h1',
parent=['x'],
parent_dim=[784],
nout=1000,
unit='relu',
init_W=init_W,
init_b=init_b)
output = FullyConnectedLayer(name='output',
parent=['h1'],
parent_dim=[1000],
nout=10,
unit='softmax',
init_W=init_W,
init_b=init_b)
# You will fill in a list of nodes
nodes = [h1, output]
示例11: InitCell
batch_size = mn_batch_size = 2
init_W = InitCell('rand', low=-0.5, high=0.5)
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x = T.tensor3('x', dtype=theano.config.floatX)
x.tag.test_value = np.random.rand(2, batch_size, frame_size).astype(theano.config.floatX)
epsilonij = 0.0001
x_1 = FullyConnectedLayer(name='x_1',
parent=['x'],
parent_dim=[frame_size],
nout=150,
unit='relu',
init_W=init_W,
init_b=init_b)
theta_mu = FullyConnectedLayer(name='theta_mu',
parent=['x_1'],
parent_dim=[150],
nout=200,
unit='linear',
init_W=init_W,
init_b=init_b)
nodes = [x_1, theta_mu]
for node in nodes:
示例12: main
def main(args):
trial = int(args["trial"])
pkl_name = "vrnn_gauss_%d" % trial
channel_name = "valid_nll_upper_bound"
data_path = args["data_path"]
save_path = args["save_path"]
data_path = os.path.expanduser(args["data_path"])
save_path = os.path.expanduser(args["save_path"])
monitoring_freq = int(args["monitoring_freq"])
force_saving_freq = int(args["force_saving_freq"])
reset_freq = int(args["reset_freq"])
epoch = int(args["epoch"])
batch_size = int(args["batch_size"])
m_batch_size = int(args["m_batch_size"])
x_dim = int(args["x_dim"])
z_dim = int(args["z_dim"])
rnn_dim = int(args["rnn_dim"])
lr = float(args["lr"])
debug = int(args["debug"])
print "trial no. %d" % trial
print "batch size %d" % batch_size
print "learning rate %f" % lr
print "saving pkl file '%s'" % pkl_name
print "to the save path '%s'" % save_path
q_z_dim = 500
p_z_dim = 500
p_x_dim = 600
x2s_dim = 600
z2s_dim = 500
target_dim = x_dim
file_name = "blizzard_tbptt"
normal_params = np.load(data_path + file_name + "_normal.npz")
X_mean = normal_params["X_mean"]
X_std = normal_params["X_std"]
model = Model()
train_data = Blizzard_tbptt(
name="train", path=data_path, frame_size=x_dim, file_name=file_name, X_mean=X_mean, X_std=X_std
)
valid_data = Blizzard_tbptt(
name="valid", path=data_path, frame_size=x_dim, file_name=file_name, X_mean=X_mean, X_std=X_std
)
x = train_data.theano_vars()
m_x = valid_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, x_dim), dtype=theano.config.floatX)
m_x.tag.test_value = np.zeros((15, m_batch_size, x_dim), dtype=theano.config.floatX)
init_W = InitCell("rand")
init_U = InitCell("ortho")
init_b = InitCell("zeros")
init_b_sig = InitCell("const", mean=0.6)
x_1 = FullyConnectedLayer(
name="x_1", parent=["x_t"], parent_dim=[x_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
x_2 = FullyConnectedLayer(
name="x_2", parent=["x_1"], parent_dim=[x2s_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
x_3 = FullyConnectedLayer(
name="x_3", parent=["x_2"], parent_dim=[x2s_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
x_4 = FullyConnectedLayer(
name="x_4", parent=["x_3"], parent_dim=[x2s_dim], nout=x2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
z_1 = FullyConnectedLayer(
name="z_1", parent=["z_t"], parent_dim=[z_dim], nout=z2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
z_2 = FullyConnectedLayer(
name="z_2", parent=["z_1"], parent_dim=[z2s_dim], nout=z2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
z_3 = FullyConnectedLayer(
name="z_3", parent=["z_2"], parent_dim=[z2s_dim], nout=z2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
z_4 = FullyConnectedLayer(
name="z_4", parent=["z_3"], parent_dim=[z2s_dim], nout=z2s_dim, unit="relu", init_W=init_W, init_b=init_b
)
rnn = LSTM(
name="rnn",
parent=["x_4", "z_4"],
parent_dim=[x2s_dim, z2s_dim],
nout=rnn_dim,
unit="tanh",
init_W=init_W,
#.........这里部分代码省略.........
示例13: InitCell
init_W = InitCell('rand')
init_U = InitCell('ortho')
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x, x_mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.
x_mask.tag.test_value = temp
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[frame_size],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_2 = FullyConnectedLayer(name='x_2',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_3 = FullyConnectedLayer(name='x_3',
parent=['x_2'],
parent_dim=[x2s_dim],
示例14: InitCell
init_b = InitCell('zeros')
init_b_sig = InitCell('const', mean=0.6)
x, x_mask = train_data.theano_vars()
if debug:
x.tag.test_value = np.zeros((15, batch_size, frame_size), dtype=np.float32)
temp = np.ones((15, batch_size), dtype=np.float32)
temp[:, -2:] = 0.
x_mask.tag.test_value = temp
x_tm1 = T.concatenate([T.zeros((1, x.shape[1], x.shape[2])), x[:-1]], axis=0)
x_tm1.name = 'x_tm1'
x_1 = FullyConnectedLayer(name='x_1',
parent=['x_t'],
parent_dim=[frame_size],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_2 = FullyConnectedLayer(name='x_2',
parent=['x_1'],
parent_dim=[x2s_dim],
nout=x2s_dim,
unit='relu',
init_W=init_W,
init_b=init_b)
x_3 = FullyConnectedLayer(name='x_3',
parent=['x_2'],
parent_dim=[x2s_dim],
示例15: GFLSTM
h3 = GFLSTM(name='h3',
parent=['x', 'h2'],
parent_dim=[205, 200],
recurrent=['h1', 'h2'],
recurrent_dim=[200, 200],
nout=200,
unit='tanh',
init_W=init_W,
init_U=init_U,
init_b=init_b)
output = FullyConnectedLayer(name='output',
parent=['h1', 'h2', 'h3'],
parent_dim=[200, 200, 200],
nout=205,
unit='softmax',
init_W=init_W,
init_b=init_b)
nodes = [h1, h2, h3, output]
for node in nodes:
node.initialize()
params = flatten([node.get_params().values() for node in nodes])
step_count = sharedX(0, name='step_count')
last_h = np.zeros((batch_size, 400), dtype=np.float32)
h1_tm1 = sharedX(last_h, name='h1_tm1')
h2_tm1 = sharedX(last_h, name='h2_tm1')